selenium模块跳过用户名密码验证码输入,加载浏览器标签和cookie,进行翻页爬虫多页动态加载的数据(js)

能解决登陆一次后,之后不需要二次登陆的动态加载数据,网页保存的cookie和标签,加入到selenium自动化测试浏览器中

 1 from selenium import webdriver
 2 import re
 3 from time import sleep
 4 
 5 def willbill():
 6     '''点击翻页,获取下一页数据,进行正则匹配'''
 7     driver.find_element_by_xpath('//*[@id="upPage"]/li[5]/a').click()
 8     sleep(3)
 9     data = driver.page_source
10     results_list = 'class="limingcentUrlpic">(.*?)</a>&nbsp;'
11     res_list = re.findall(results_list, data, re.S)
12     all_willbill.append(res_list)
13     return all_willbill
14 
15 #将Chrome浏览器中的标签和cookie都加载上进行爬虫,
16 all_willbill=[]
17 profile_directory = r'--user-data-dir=C:\Users\Administrator\AppData\Local\Google\Chrome\User Data'
18 option = webdriver.ChromeOptions()
19 option.add_argument(profile_directory)
20 driver = webdriver.Chrome(options=option)
21 url = 'https://www.dianxiaomi.com/order/index.htm'
22 driver.get(url)
23 sleep(3)
24 driver.find_element_by_xpath('//*[@id="m105"]').click()
25 sleep(2)
26 driver.find_element_by_xpath('//*[@id="onlyShowRemove"]').click()
27 sleep(3)
28 data = driver.page_source
29 results_list= 'class="limingcentUrlpic">(.*?)</a>&nbsp;'
30 
31 res_list = re.findall(results_list,data,re.S)
32 print(res_list)
33 all_willbill.append(res_list)
34 for i in range(108):
35     willbill()
36 print(all_willbill)
37 #保存所有的订单号到Excel表格
38 def save_excel():
39     import pandas as pd
40     import os
41     cwd = os.getcwd()
42     Hearder =['订单号']
43     #二维数组列表转成一维数组列表
44     list_all_will = []
45     for i in range(len(all_willbill)):
46         for res in all_willbill[i]:
47             list_all_will.append(res)
48     test = pd.DataFrame(columns=Hearder,data=list_all_will)
49     test.to_excel(cwd+'\\'+'all_willbill.xlsx')
50     print('保存订单号成功!')
51 save_excel()

猜你喜欢

转载自www.cnblogs.com/chunfang/p/12801613.html