爬虫学习笔记2-urllib库的爬虫

'''
from urllib import request
from urllib import parse

url = 'https://www.lagou.com/jobs/list_python/p-city_0?&cl=false&fromSearch=true&labelWords=&suginput='
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
}

req = request.Request(url,headers=headers)
resp = request.urlopen(req)
print(resp.read().decode("UTF-8"))

url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36',
    'referer': 'https://www.lagou.com/jobs/list_python/p-city_0?&cl=false&fromSearch=true&labelWords=&suginput='
}
data = {
    'first':'true',
    'pn':'1',
    'kd':'python'
}
req = request.Request(url, headers=headers, data=parse.urlencode(data).encode('utf-8'), method='POST')
resp = request.urlopen(req)
print(resp.read().decode('utf-8'))
#伪装头

handler = request.ProxyHandler({"http":"218.66.161.88:31769"})
opener = request.build_opener(handler)
#代理使用方法
url = 'http://httpbin.org/ip'
resp = request.urlopen(url)
print(resp.read())
#不使用代理

url    = 'http://httpbin.org/ip'
handler = request.ProxyHandler({"http":"120.83.104.41:9999"})#这里代理可以用https://www.kuaidaili.com/ops/里面的代理
opener = request.build_opener(handler)
resp = opener.open(url)
print(resp.read())
#使用代理

#代理的原理:再请求头的服务器之前,先请求代理服务器,然后让代理服务器去请求目的网站,代理服务器拿到目的网站的数据后再转发给我们代码
#http://httpbin.org/ip这个网站可以查询到当前ip
#在代码中使用代理:
#使用urllib.request.ProxyHandler传入一个代理,这个代理是一个字典,字典的key依赖于子代理服务器能够接受的类型,一般是http或者https值是ip:port
#使用上一步创建的‘opener调用open函数发送请求

#不使用cookie
url = 'http://www.renren.com/timelinefeedretrieve.do?ownerid=880151247&render=0&begin=0&limit=30&year=2020&month=1&isAdmin=false'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36',
    'Cookie':'anonymid=k9c7cuvupvfljn; depovince=GW; _r01_=1; taihe_bi_sdk_uid=2abd1e1b11255510735aded0d73d7f72; jebe_key=1328d954-1482-40d4-9eda-2c876d82959e%7C8bb73a413062d1aef63c9d49814fc0fa%7C1587612508161%7C1%7C1587612508488; _de=8319BFECB005814517E84409A2CA013B; ln_uact=18790838389; ln_hurl=http://head.xiaonei.com/photos/0/0/men_main.gif; JSESSIONID=abcdAL7exaCHi-LgwnQgx; ick_login=b8df0111-b95a-4d36-b436-75245c16734e; taihe_bi_sdk_session=c60b20b38ac339b91781e5e60e042cbd; jebecookies=77ddc65b-a7f4-4690-aea4-aab442321aeb|||||; p=8fcf452d7f3866bd764c9567f315b5eb7; first_login_flag=1; t=b394fda671476a217285c73d0b36df917; societyguester=b394fda671476a217285c73d0b36df917; id=974285767; xnsid=55c505fe; ver=7.0; loginfrom=null; wp_fold=0; jebe_key=1328d954-1482-40d4-9eda-2c876d82959e%7C8bb73a413062d1aef63c9d49814fc0fa%7C1587612508161%7C1%7C1587698927904'
}
req = request.Request(url,headers=headers)
resp = request.urlopen(req)
print(resp.read().decode("UTF-8"))
#cookie代码模拟登录人人网为例
#decode解码encode编码


#大鹏董成主页http://www.renren.com/880151247/profile
#http.Cookiejar模块
from urllib import request
from urllib import parse
from http.cookiejar import CookieJar
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
}
#1.登录
#1.1.创建cookie对象
CookieJar = CookieJar()
#1.2.使用cookiejar创建httpcookieprocess对象
handler = request.HTTPCookieProcessor(CookieJar)
#1.3.使用上一步创建的handler创建一个opener
opener = request.build_opener(handler)
#1.4.使用opener发送登录请求(人人网邮箱和密码) 
data = {
    'email':'xxxxx',
    'password':'xxxxx'
}
login_url = 'http://www.renren.com/SysHome.do'
req = request.Request(login_url, data=parse.urlencode(data).encode('utf-8'), headers=headers)
opener.open(req)

#2.访问网页
depeng_url = 'http://www.renren.com/880151247/profile'
#获取个人主要用已经有的opener opener已经包含了所需cookie
resp = opener.open(depeng_url)
with open('renren.html', 'w', encoding='utf-8') as fp:
    fp.write(resp.read().decode('utf-8'))
#自动化登录

#cookie信息获取
from urllib import request
from http.cookiejar import MozillaCookieJar

cookiejar = MozillaCookieJar('cookie.txt')
handler = request.HTTPCookieProcessor(cookiejar)
opener = request.build_opener(handler)

resp = opener.open('http://www.baidu.com/')
cookiejar.save()#会产生一个cookie文件保存未过期的

#将cookie信息导入文件如下:
from urllib import request
from http.cookiejar import MozillaCookieJar

cookiejar = MozillaCookieJar('cookie.txt')
cookiejar.load(ignore_discard=True)#这个可以保存已经过期的
handler = request.HTTPCookieProcessor(cookiejar)
opener = request.build_opener(handler)

resp = opener.open('http://httpbin.org/cookies')
for cookie in cookiejar:
    print(cookie)#把cookie信息输出到屏幕上
'''

猜你喜欢

转载自blog.csdn.net/qq_43568078/article/details/105733684