封装函数,简单的反爬虫页面,和简单获取页面

from urllib import request,parse
from urllib.error import HTTPError,URLError
import json
from http import cookiejar

class session(object):

    def __init__(self):
        # 实例化cookie用于储存cookie
        cookie_object = cookiejar.CookieJar()
        handler = request.HTTPCookieProcessor(cookie_object)
        # opener 遇到有cookie的response的时候,
        # 调用handler内部的一个函数, 存储到cookie object
        self.opener = request.build_opener(handler)
        def get(self,url,headers=headers):
            return get(url,headers,self.opener)
        def post(self,url,form,headers=headers):
            return post(url,form,headers,self.opener)

————————————————————————————————————


#封装get()请求
def get(url,headers=None):
    return urlrequests(url,headers=headers)
#封装POST()请求
def post(url,form,headers=None):
    return urlrequests(url,form,headers=headers)


                # 1. 传入url
def urlrequests(url,form=None,headers=None,opener=None):
# 2. 给user_agent
    user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
# 3. headers
#     用户自行输入headers则会覆盖默认headers
    if headers==None:
        # 请求头
        headers = {
            'User-Agent':user_agent
            }
    html_bytes = b''
    # 4. 定义Request
    try:
        if form :

            # form转换字符串
            form_str = parse.urlencode(form)
            # 转换bytes类型
            form_bytes = form_str.encode('utf-8')
            # 调用reques函数和方法
            req = request.Request(url,headers=headers,data=form_bytes)
        else:
            # 调用reques函数和方法
            req = request.Request(url,headers=headers)
        if opener:
            response = opener.open(req)
        else:
            # 调用request中的urlopen方法
            response = request.urlopen(req)

    # 6. 返回byte数组
        html_bytes = response.read().decode('utf-8')

    except HTTPError as e:
        print(e)
    except URLError as e:
        print(e)


    return html_bytes

# 函数调用
if __name__=='__main__':
    # url = 'http://www.baidi.com/'
    # res = urlrequests(url)
    # print(res.decode('utf-8'))
    # #
    url = 'http://fanyi.baidu.com/sug'
    form = {
        'kw':'nice'
    }
    # res = urlrequests(url,form=form)
    # print(res.decode('utf-8'))
    # req = get(url)
    # print(req)
    #
    req = post(url,form=form)

    # print(req.decode('utf-8'))
    req_dict = json.loads(req)['data'][0]['v']

    print(req_dict)

猜你喜欢

转载自blog.csdn.net/super_man_ing/article/details/81676265