python 爬虫代码

https://www.doutula.com/photo/list/?page=1

import requests,os,re,threading,time
from lxml import etree
from urllib import request
from queue import Queue
header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'
    }
class procuder(threading.Thread):


    def __init__(self,page_queue,img_queue,*args,**kwargs):
        super(procuder,self).__init__(*args,**kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.parse_spider(url)


    def parse_spider(self,url):

        response = requests.get(url=url,headers=header).text
        html = etree.HTML(response)
        imgs = html.xpath("//div[@class='page-content text-center']//a[@class='col-xs-6 col-sm-3']//img[@class!='gif']")
        for img in imgs:
            img_url = img.get('data-original')
            alt = img.get('alt')
            alt = re.sub(r'\???\.,。?!\*','',alt)
            suffix = os.path.splitext(img_url)[1]
            filename = alt+suffix
            self.img_queue.put((img_url,filename))

class Consumer(threading.Thread):
    def __init__(self,page_queue,img_queue,*args,**kwargs):
        super(Consumer,self).__init__(*args,**kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.img_queue.empty() and self.page_queue.empty():
                break
            img_url,filename = self.img_queue.get()
            request.urlretrieve(img_url,'images/'+filename)
            print(filename+' 下载完成! ')

def main():
    page_queue = Queue(1000)
    img_queue = Queue(500000)
    for i in range(1,200):
        url = 'https://www.doutula.com/photo/list/?page=%d' %i
        page_queue.put(url)
        print(i)

    for x in range(100):
        t = procuder(page_queue,img_queue)
        t.start()
    print('='*20)
    time.sleep(10)#这里等待10秒,尽可能的往page_queue里面添加数据,以免数据过少,导致下面的线程浪费(上面break之后线程就会结束)
    for x in range(100):
        t = Consumer(page_queue,img_queue)
        t.start()

if __name__ == '__main__':
    main()
import requests

def request_list_page():
    url = "https://www.lagou.com/jobs/positionAjax.json?city=%E4%B8%8A%E6%B5%B7&needAddtionalResult=false"
    url_1 = "https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput="
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Host": "www.lagou.com",
        "Referer": "https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?labelWords=&fromSearch=true&suginput=",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
    }
    data = {
        "first": "true",
        "pn": "1",
        "kd": "python"
    }
    session = requests.Session()  # 创建cookie存储
    session.get(url=url_1, headers=headers)  # 通过网址url1建立cookie
    response = session.post(url=url, headers=headers, data=data)
    response.encoding = 'utf-8'
    print(response.json())

if __name__ == '__main__':
    request_list_page()


from selenium import webdriver#还需要把红包去掉才行
from lxml import etree
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

class lagouSplider(object):
    driver_path = r"C:\Users\top\Downloads\chromedriver_win32\chromedriver.exe"
    def __init__(self):
        self.driver = webdriver.Chrome(executable_path=lagouSplider.driver_path)
        self.url = 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput='
        self.list_1 = list()
    def run(self):
        self.driver.get(self.url)
        while True:
            source = self.driver.page_source
            WebDriverWait(driver=self.driver,timeout=10).until(
                EC.presence_of_element_located((By.XPATH,"//div[@class='pager_container']/span[last()]"))
            )
            self.parse_list_page(source)
            next_btn = self.driver.find_element_by_xpath("//div[@class='pager_container']/span[last()]")
            if "pager_container_disabled" in next_btn.get_attribute("class"):
                break
            else:
                next_btn.click()
                time.sleep(1)
        a(self.list_1)

    def a(self,list_1):
        for link in link_1:
            self.request_detail_page(link)
            time.sleep(1)

    def parse_list_page(self,source):
        html = etree.HTML(source)
        links = html.xpath("//a[@class='position_link']/@href")
        self.list_1.append(links)

    def request_detail_page(self,url):
        self.driver.get(url)
        source = self.driver.page_source
        self.parse_detail_page(source)

    def parse_detail_page(self,source):
        html = etree.HTML(source)
        position_name = html.xpath("//h1[@class='name']/text()")
        position_salary = html.xpath("//dd[@class='job_request']//span[@class='salary']/text()")
        print(position_name)
        print(position_salary)

if __name__ == '__main__':
    spider = lagouSplider()
    spider.run()
import pytesseract
from PIL import Image

pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files (x86)\Tesseract-OCR\tesseract.exe"

image = Image.open(r'C:\Users\top\Pictures\Saved Pictures\QQ截图20200518100210.png')
text=pytesseract.image_to_string(image,lang='chi_sim')
print(text)

猜你喜欢

转载自blog.csdn.net/weixin_45949073/article/details/106125563