爬虫--多进程爬取妹子图

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/MR_HJY/article/details/81879880
import requests
from lxml import etree
import os
import multiprocessing
from multiprocessing import Queue, Pool

# 下载图片
def  download_img(img_url_referer_url):
    # print(img_url)
    (img_url, referer) = img_url_referer_url
    # print('Downloading ......' + img_url)
    headers = {
        'referer': referer,
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
    }
    # print(headers)
    if os.path.exists('download'):
        pass
    else:
        os.mkdir('download')
    filename = 'download/'+img_url.split('/')[-1]
    response = requests.get(img_url,headers=headers)
    with open(filename,'wb') as f:
        f.write(response.content)

def pares_detailed_page(url_href, queue):
    response = requests.get(url_href)
    html_element = etree.HTML(response.text)
    max_page = html_element.xpath('//div[@class="pagenavi"]/a/span/text()')[-2]
    # print(max_page)
    for i in range(1,int(max_page)+1):
        page_url = url_href + '/' + str(i)
        response = requests.get(page_url)
        html_element =  etree.HTML(response.text)
        img_url = html_element.xpath('//div[@class="main-image"]/p/a/img/@src')[0]
        # print(img_url)
        #download_img(img_url, url_href)
        queue.put((img_url, url_href))


# 图片的获取
def get_all_image_url(queue):
    url = 'http://www.mzitu.com/'
    response = requests.get(url)
    print(queue)
    html_element = etree.HTML(response.text)
    href_list = html_element.xpath('//ul[@id="pins"]/li/a/@href')
    # print(max_page)
    for href in href_list:
        pares_detailed_page(href,queue)



if __name__ == '__main__':
    q = Queue()
    p = multiprocessing.Process(target=get_all_image_url,args=(q,))
    p.start()

    download_pool = Pool(5)
    for i in range(0,200):
        image_url_referer_url = q.get()
        # print(image_url_referer_url)
        download_pool.apply_async(download_img,(image_url_referer_url))

    download_pool.close()
    download_pool.join()
    p.join()





猜你喜欢

转载自blog.csdn.net/MR_HJY/article/details/81879880
今日推荐