python reptiles exemplary multi coroutine

Use gevent modules and module queue write python coroutine can greatly improve the speed of reptiles. When crawling multiple sites, the original site for a site loop according to a sequential order to climb at the same time as the first cooking cook, two steps asynchronously. Coroutine can make use of multiple reptiles crawling the order of their choice, like cooking side edge cooking, two steps simultaneously, naturally faster speed.
Not much to say, look at the code for it:

from gevent import monkey
monkey.patch_all()
#打上多协程布丁,下面的程序就可以执行多协程了

import requests,gevent,csv
from gevent.queue import Queue
from bs4 import BeautifulSoup

#把所有URL都放到一个列表里:
url_list=[]
i=1
for i in range(10):
    i=i+1
    url='http://www.mtime.com/top/tv/top100/index-'+str(i)+'.html'
    url_list.append(url)
#第一个url和别的不一样,需要单独加入
url_0='http://www.mtime.com/top/tv/top100/'
url_list.append(url_0)

headers={
    'User-Agent': 
}

csv_file=open('时光网电影列表.csv','a+',newline='',encoding='utf-8')
writer=csv.writer(csv_file)
file_head=['电影名称','导演','主演','简介']
writer.writerow(file_head)

def list(movies):
    for movie in movies:
        title=movie.find('h2',class_="px14 pb6").find('a').text
        acts=movie.find_all('p')
        try:
            dic=acts[0].text
        except IndexError:
            dic='none'

        try:
            actor=acts[1].text
        except IndexError:
            actor='none'

        try:
            bief=movie.find('p',class_="mt3").text
        except AttributeError:
            bief='none'
        writer.writerow([title,dic,actor,bief])

#所有url都放到‘不用等’房间里:
work=Queue()
for url in url_list:
    work.put_nowait(url)

#爬虫对象:
def crawler():
    while not work.empty():
        url=work.get_nowait()
        res=requests.get(url,headers=headers)
        soup=BeautifulSoup(res.text,'html.parser')
        movies=soup.find_all('div',class_="mov_con")
        list(movies)
        print(url,work.qsize(),res.status_code)        

#建立多协程任务,任务不用建太多,2个就够,太多的话对方服务器承受不了
tasks_list=[]
for x in range(2):
    task=gevent.spawn(crawler)
    tasks_list.append(task)

gevent.joinall(tasks_list)
csv_file.close()

Guess you like

Origin blog.51cto.com/14534896/2436959