登陆后给抽屉网点赞

下载autohome的图片

import requests
from bs4 import BeautifulSoup
import os,io,uuid

response = requests.get('http://www.autohome.com.cn/news')
response.encoding = response.apparent_encoding
soup = BeautifulSoup(response.text,features='html.parser')
target = soup.find(id='auto-channel-lazyload-article')
li_list = target.find_all('li')

for i in li_list:
    a = i.find('a')
    if a:
        #url = a.attrs.get('href')
        #print url
        txt = a.find('h3').text
        img_url = 'http:'+a.find('img').attrs.get('src')
        img_response = requests.get(url = img_url)
        file_name = str(uuid.uuid4())+'.jpg' 
        if not os.path.exists('d:/python/auto'):
            os.mkdir('d:/python/auto')
        with open('d:/python/auto/'+file_name,'wb') as f:
            f.write(img_response.content)

登陆后给抽屉网点赞

import requests,re
r1 = requests.get('http://dig.chouti.com/')
r1_cookies=r1.cookies.get_dict()

post_dict={
    "phone":'8613304652586',
    'password':'nuominhe',
    'oneMonth':1
}
r2 = requests.post(
    url = 'http://dig.chouti.com/login',
    data = post_dict,
    cookies=r1_cookies
)
# 给某个Id点赞
r3 = requests.post(
    url = 'http://dig.chouti.com/link/vote?linksId=16156171',
    #cookies = r1_cookies
    cookies = {'gpsd':r1_cookies.get('gpsd')}
)
# 给第2页的所有文章点赞
r4 = requests.post(
    url = 'http://dig.chouti.com/all/hot/recent/2',
    cookies = {'gpsd':r1_cookies.get('gpsd')}
)
html = r4.text
data1 = re.findall('id="newsContent(\d+)"',html)


for i in range(len(data1)):
    url = 'http://dig.chouti.com/link/vote?linksId='+data1[i]
    cookies = {'gpsd':r1_cookies.get('gpsd')}
    r = requests.post(url=url,cookies=cookies)
发布了14 篇原创文章 · 获赞 24 · 访问量 2万+

猜你喜欢

转载自blog.csdn.net/xxuffei/article/details/78904084