selenium+chrome爬虫

from selenium import webdriver
import time

browser = webdriver.Chrome()

def baidu(browser):
    url = "http://index.baidu.com/"
    browser.get(url)
    browser.find_element_by_xpath('//*[@id="home"]/div[1]/div[2]/div[1]/div[4]/span/span').click()
    time.sleep(2) 
    username = 'XXX'
    passwd = 'YYY'
    browser.find_element_by_id('TANGRAM__PSP_4__userName').send_keys(username)
    browser.find_element_by_id('TANGRAM__PSP_4__password').send_keys(passwd)
    browser.find_element_by_id('TANGRAM__PSP_4__submit').click()    

def deal(browser):
    content = 'python'
    browser.find_element_by_xpath('//*[@id="search-input-form"]/input[3]').clear()
    browser.find_element_by_xpath('//*[@id="search-input-form"]/input[3]').send_keys(content)
    browser.find_element_by_xpath('//*[@id="home"]/div[2]/div[2]/div/div[1]/div/div[2]/div/span/span').click()
    browser.execute_script('window.scrollTo(0,400)')
    browser.save_screenshot('baidu.png')
    browser.close()

baidu(browser)
time.sleep(30)
deal(browser)

猜你喜欢

转载自blog.csdn.net/weixin_41752427/article/details/81052360