防爬虫 网站 用 selenium破解

#!/usr/bin/env python  获取单个信息
# coding=utf-8
import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import selenium.webdriver.support.ui as ui

browser = webdriver.Firefox()

def is_visible(locator, timeout = 10):
    try:
        ui.WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, locator)))
        return True
    except TimeoutException:
        return False
    
browser.get("http://zssom.sysu.edu.cn/zh-hans/teacher/377")
is_visible('/html/body/div[2]/div[2]/div[1]')
html = browser.page_source
content = BeautifulSoup(html, "lxml")
description = content.find(attrs={
    
    "name":"description"})['content']
print(description)
#以下还有问题 请小心 无法运行
#以下为中山医实例 此处向中山大学致以崇高的敬意与祝福 欢迎提意见
#!/usr/bin/env python 如有侵权 亲联系 删文章哦
# coding=utf-8
import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import selenium.webdriver.support.ui as ui

browser = webdriver.Firefox()

def is_visible(locator, timeout = 10):
    try:
        ui.WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, locator)))
        return True
    except TimeoutException:
        return False
#编辑 url
i =350
while i <381:
    i=i+1
    a = str(i)
    b = "http://zssom.sysu.edu.cn/zh-hans/teacher/"
    url= b+a
    browser.get(url)
    
    if is_visible('/html/body/div[2]/div[2]/div[1]'):
        html = browser.page_source
        content = BeautifulSoup(html, "lxml")
        #获取老师信息内容
        description = content.find(attrs={
    
    "name":"description"})['content']
        print(description)
    else:
        print("获取内容为空")
else:
    print("over")

tips
1.用selenium 模拟浏览器行为
2.拼接字符串 设计url
3.bs4 获取 节点数据

猜你喜欢

转载自blog.csdn.net/qq_42676042/article/details/106939185