(二)网络爬虫的盗亦有道
(1)网络爬虫的限制
-
来源审查:检查来访HTTP协议头的User-Agent域
-
发布公告:Robots协议 —— 网站根目录下的robots.txt文件
(三)Requests库网络爬取实战
(1)京东商品页面的爬取
import requests
def getHTMLText(url):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
r = requests.get(url,headers=Headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text[:1000]
except:
return "产生异常"
if __name__ == "__main__":
url = "https://item.jd.com/100004323294.html"
print(getHTMLText(url))
(2)亚马逊商品页面的爬取
import requests
def getHTMLText(url):
try:
Headers ={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Cookies': 'session-id=460-2669091-3751610; i18n-prefs=CNY; ubid-acbcn=460-5518508-6686437; x-wl-uid=1ZNig4QusLPyi/2fMRt2+/B9UI77CSP/FURf3oOMMEU5Qkn5d5DWvk3ZFsfCwdw4gQUS8PQ6cQls=; session-token="Tsypc2KcjVgqDHIbYljZ3S6e6UpT8lE0Ep5iBlUEUMOR1c6UOTsT46LMslGbryJIDtKQi9eEPX3DDHl4GrcE39k7YvQKBZkkcJ7Iyz6WJo69+IsEl5RCj4I5lStPd8Aysjq91yFBZT7jviCBycWPKVz+Df2gI+6L5haArRakytUPoYW0t4wASl/nz4LpD8dYh9xlXuBViLQf7en5aVPOhxSU9h7IuM1MDO7wQLYrFiFrICD/rphjTw=="; session-id-time=2082729601l; csm-hit=tb:G7W4MG6KYD3V0Z4C2BPR+s-0AWC79VGM2DFRQZSHM5N|1586669357960&t:1586669357960&adb:adblk_no'
}
r = requests.get(url,headers=Headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
url = "https://www.amazon.cn/dp/B07FQKB4TM"
print(getHTMLText(url))
(3)百度/360搜索关键词提交
- 百度关键词接口:http://www.baidu.com/s?wd=keyword
import requests
def getHTMLSearch(url, keyword):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
kv = {'wd': keyword}
r = requests.get(url, headers=Headers, params=kv, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
url = "http://www.baidu.com/s"
print(len(getHTMLSearch(url, keyword='Python')))
- 360关键词接口:http://www.so.com/s?q=keyword
import requests
def getHTMLSearch(url, keyword):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
kv = {'q': keyword}
r = requests.get(url, headers=Headers, params=kv, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
url = "http://www.so.com/s"
print(len(getHTMLSearch(url, keyword='Python')))
(4)网络图片的爬取和存储
import requests
import os
url = "http://image.nationalgeographic.com.cn/2017/0211/20170211061910157.jpg"
root = "D://pics//"
path = root + url.split('/')[-1]
try:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(url)
with open(path,'wb') as f:
f.write(r.content)
f.close()
print("pic save success")
else:
print("pic already exist")
except:
print("spider fail")
(5)IP地址归属地的自动查询
https://www.ip138.com/iplookup.asp?ip={ipaddress}&action=2
import requests
def getHTMLText(url):
try:
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
}
r = requests.get(url,headers=Headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常"
if __name__ == "__main__":
ip = '202.204.80.112'
url = "https://www.ip138.com/iplookup.asp?ip={}&action=2".format(ip)
print(getHTMLText(url))