爬取妹子网,重点是加入开头的'Referer':'http://www.mzitu.com/'

爬取妹纸网,重点是加入

'Referer':'http://www.mzitu.com/'
import  requests
import  re
import time
import random
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.33 Safari/537.36'
           ,'Referer':'http://www.mzitu.com/'}

#headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.33 Safari/537.36'}
session = requests.session()
session.keep_alive = False
for  a in range(1,10):
        url = 'https://www.mzitu.com/149482/'+str(a)
        #url ='https://www.mzitu.com/zipai/comment-page-'+str(1)+'/#comments'
        data = requests.get(url,headers=headers).text
        #正则表达式
        #photo = r'<.*?class="lazy".*?src=".*?".*?data-original="(.*?)".*?width=.*?>'#清纯妹纸表达式
        photo = r'<p>.*?<.*?src=.*?"(.*?)".*?alt=.*?width=.*?>'
        #name = r'<.*?class="comment-meta commentmetadata"><.*?(href=.*?)></div>'
        photo_url=re.findall(photo,data,re.S)#正则,源代码,规则
        print(photo_url)
        #title_name = re.findall(name,data,re.S)
        time.sleep(2)
        for i,b in enumerate(photo_url):
              header = {'Referer':url}
              response = requests.get(photo_url[i],headers = headers)
              print(response)
              print("正在下载第%s张  "%(a))
              with open('{}.jpg'.format(a),'wb')  as f:
                  f.write(response.content)
              time.sleep(2)

加入 Referer:目的是请求时,告诉网站从哪里进来的

猜你喜欢

转载自www.cnblogs.com/ilovelh/p/10382657.html
今日推荐