爬虫时候遇到python connection error max retries exceeded whith url 怎么解决?

 
 
import requests
#关闭多余的连接
s = requests.session()
s.keep_alive = False
#增加重试连接次数
s.adapters.DEFAULT_RETRIES = 511
url = 'https://s.m.taobao.com/search?m=api4h5&nick=%E4%BC%98%E8%A1%A3%E5%BA%93%E5%AE%98%E6%96%B9%E6%97%97%E8%88%B0%E5%BA%97&n=40&page=1&qq-pf-to=pcqq.c2c'
datas = s.get(url).text
print(datas)

 
 

猜你喜欢

转载自blog.csdn.net/weixin_42341608/article/details/80737187