爬虫(4)、性能相关

目录


同步、异步、回调机制

1、同步调用

import requests


def parse_page(res):
    print('解析 %s' % (len(res)))


def get_page(url):
    """获取response.text"""
    print('下载 %s' % url)
    response = requests.get(url)
    if response.status_code == 200:
        return response.text


urls = ['https://www.baidu.com/', 'http://www.sina.com.cn/',
        'https://www.python.org']

for url in urls:
    # 调用一个任务,就在原地等待任务结束拿到结果后才继续往后执行
    res = get_page(url) 
    parse_page(res)

问题:提交一个任务后就在原地等待任务结束,等到拿到任务的结果后再继续下一行代码,效率低下。

2、多线程

import requests
from threading import Thread,current_thread
import time

def parse_page(res):
    print('%s 解析 %s' %(current_thread().getName(),len(res)))


def get_page(url, callback=parse_page):
    print('%s 下载 %s' %(current_thread().getName(),url))
    response=requests.get(url)
    if response.status_code == 200:
        callback(response.text)


if __name__ == '__main__':
    urls=['https://www.baidu.com/','http://www.sina.com.cn/','https://www.python.org']
    start_time = time.time()
    t_list = []
    for url in urls:
        t=Thread(target=get_page,args=(url,))
        t.start()
        t_list.append(t)
    for t in t_list:
        t.join()
    end_time = time.time()
    print('耗时: %s' % (end_time-start_time))

问题:开启多进程或都线程的方式,我们是无法无限制地开启多进程或多线程的:在遇到要同时响应成百上千路的连接请求,则无论多线程还是多进程都会严重占据系统资源,降低系统对外界响应效率,而且线程与进程本身也更容易进入假死状态。

3、线程池

import requests
from threading import current_thread
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor


def parse_page(res):
    res = res.result()
    print('%s 解析 %s' % (current_thread().getName(), len(res)))


def get_page(url):
    print('%s 下载 %s' % (current_thread().getName(), url))
    response = requests.get(url)
    if response.status_code == 200:
        return response.text


if __name__ == '__main__':
    urls = ['https://www.baidu.com/', 'http://www.sina.com.cn/', 'https://www.python.org']

    pool = ThreadPoolExecutor(50)
    # pool=ProcessPoolExecutor(50)
    for url in urls:
        pool.submit(get_page, url).add_done_callback(parse_page)

    pool.shutdown(wait=True)

问题:“线程池” 和 “连接池”技术也只是在一定程度上缓解了频繁调用IO接口带来的资源占用。而且,所谓“池”始终有其上限,当请求大大超过上限时,“池”构成的系统对外界的响应并不比没有池的时候效果好多少。所以使用“池”必须考虑其面临的响应规模,并根据响应规模调整“池”的大小。


高性能

上述无论哪种解决方案其实没有解决一个性能相关的问题:IO阻塞,无论是多进程还是多线程,在遇到IO阻塞时都会被操作系统强行剥夺走CPU的执行权限,程序的执行效率因此就降低了下来。

解决这一问题的关键在于,我们自己从应用程序级别检测IO阻塞然后切换到我们自己程序的其他任务执行,这样把我们程序的IO降到最低,我们的程序处于就绪态就会增多,以此来迷惑操作系统,操作系统便以为我们的程序是IO比较少的程序,从而会尽可能多的分配CPU给我们,这样也就达到了提升程序执行效率的目的

1、asyncio手动封装http报头

asyncio模块基础:实现用户级别的单线程并发。

import asyncio


@asyncio.coroutine
def task(task_id, senconds):
    print('%s is start' % task_id)
    # 只能检测网络IO,检测到IO后切换到其他任务执行
    # 切换并保存状态
    yield from asyncio.sleep(senconds)
    print('%s is end' % task_id)


tasks = [task(task_id="任务1", senconds=3),
         task("任务2", 3),
         task(task_id="任务3", senconds=3)]

loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
loop.close()

但asyncio模块只能发tcp级别的请求,不能发http协议,因此,在我们需要发送http请求的时候,需要我们自定义http报头

import asyncio
import requests
import uuid  # 通用的唯一标识码
user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'


def parse_page(host,res):
    print('%s 解析结果 %s' %(host,len(res)))
    with open('%s.html' %(uuid.uuid1()),'wb') as f:
        f.write(res)


@asyncio.coroutine
def get_page(host,port=80,url='/',callback=parse_page,ssl=False):
    print('下载 http://%s:%s%s' %(host,port,url))

    # 步骤一(IO阻塞):发起tcp链接,是阻塞操作,因此需要yield from
    if ssl:
        port=443
    recv,send=yield from asyncio.open_connection(host=host,port=443,ssl=ssl)

    # 步骤二:封装http协议的报头,因为asyncio模块只能封装并发送tcp包,因此这一步需要我们自己封装http协议的包
    request_headers="""GET %s HTTP/1.0\r\nHost: %s\r\nUser-agent: %s\r\n\r\n""" %(url,host,user_agent)
    # requset_headers="""POST %s HTTP/1.0\r\nHost: %s\r\n\r\nname=egon&password=123""" % (url, host,)
    request_headers=request_headers.encode('utf-8')

    # 步骤三(IO阻塞):发送http请求包
    send.write(request_headers)
    yield from send.drain()

    # 步骤四(IO阻塞):接收响应头
    while True:
        line=yield from recv.readline()
        if line == b'\r\n':
            break
        print('%s Response headers:%s' %(host,line))

    # 步骤五(IO阻塞):接收响应体
    text=yield from recv.read()

    # 步骤六:执行回调函数
    callback(host, text)

    # 步骤七:关闭套接字
    send.close()  # 没有recv.close()方法,因为是四次挥手断链接,双向链接的两端,一端发完数据后执行send.close()另外一端就被动地断开


if __name__ == '__main__':
    tasks=[
        get_page('www.baidu.com',url='/s?wd=美女',ssl=True),
        get_page('www.cnblogs.com',url='/',ssl=True),
    ]

    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
    loop.close()

2、asyncio + aiohttp

import aiohttp
import asyncio

@asyncio.coroutine
def get_page(url):
    print('GET:%s' %url)
    # 通过aiohttp建立连接并发起request.GET请求
    response=yield from aiohttp.request('GET',url)

    # 收包
    data=yield from response.read()

    print(url,data)
    response.close()
    return 1

tasks=[
    get_page('https://www.python.org/doc'),
    get_page('https://www.cnblogs.com/linhaifeng'),
    get_page('https://www.openstack.org')
]

loop=asyncio.get_event_loop()
results=loop.run_until_complete(asyncio.gather(*tasks))
loop.close()

print('=====>',results) #[1, 1, 1]

有错误!

Traceback (most recent call last):
  File "C:/Users/Administrator/PycharmProjects/test1/test1.py", line 21, in <module>
    results=loop.run_until_complete(asyncio.gather(*tasks))
  File "c:\users\administrator\appdata\local\programs\python\python36-32\Lib\asyncio\base_events.py", line 468, in run_until_complete
    return future.result()
  File "C:/Users/Administrator/PycharmProjects/test1/test1.py", line 7, in get_page
    response=yield from aiohttp.request('GET',url)
TypeError: '_SessionRequestContextManager' object is not iterable

3、asyncio+requests(重点)

import requests
import asyncio

@asyncio.coroutine
def get_page(func,*args):
    print('GET:%s' % args[0])
    loop = asyncio.get_event_loop()

    # 此处直接引入requests.get函数,具有建连接、发包、收包所有功能
    furture = loop.run_in_executor(None,func,*args)

    response = yield from furture

    print(response.url,len(response.text))
    return 1

tasks=[
    get_page(requests.get,'https://www.python.org/doc'),
    get_page(requests.get,'https://www.cnblogs.com/linhaifeng'),
    get_page(requests.get,'https://www.openstack.org')
]


loop=asyncio.get_event_loop()


results=loop.run_until_complete(asyncio.gather(*tasks))
loop.close()

print('=====>',results)

4、gevent协程(重点)

from gevent import monkey;monkey.patch_all()
import gevent
import requests

def get_page(url):
    print('GET:%s' %url)
    response=requests.get(url)
    print(url,len(response.text))
    return 1

g1=gevent.spawn(get_page,'https://www.python.org/doc')
g2=gevent.spawn(get_page,'https://www.cnblogs.com/linhaifeng')
g3=gevent.spawn(get_page,'https://www.openstack.org')
gevent.joinall([g1,g2,g3,])
print(g1.value,g2.value,g3.value) #拿到返回值
4.1、gevent pool协程池(重点)
from gevent import monkey;monkey.patch_all()
import gevent
import requests
from gevent.pool import Pool


def get_page(url):
    print('GET:%s' % url)
    response = requests.get(url)
    print(url, len(response.text))
    return 1


pool = Pool(2)
g1 = pool.spawn(get_page, 'https://www.python.org/doc')
g2 = pool.spawn(get_page, 'https://www.cnblogs.com/linhaifeng')
g3 = pool.spawn(get_page, 'https://www.openstack.org')
gevent.joinall([g1, g2, g3, ])
print(g1.value, g2.value, g3.value)  # 拿到返回值

5、twisted

一个网络框架,其中一个功能是发送异步请求,检测IO并自动切换
安装:

'''
#问题一:error: Microsoft Visual C++ 14.0 is required. Get it with "Microsoft Visual C++ Build Tools": http://landinghub.visualstudio.com/visual-cpp-build-tools
https://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted
pip3 install C:\Users\Administrator\Downloads\Twisted-17.9.0-cp36-cp36m-win_amd64.whl
建议下载win32版本
pip3 install twisted


#问题二:ModuleNotFoundError: No module named 'win32api'
https://sourceforge.net/projects/pywin32/files/pywin32/


#问题三:openssl
pip3 install pyopenssl
'''
from twisted.web.client import getPage,defer
from twisted.internet import reactor

def all_done(arg):
    # print(arg)
    reactor.stop()

def callback(res):
    print(res)
    return 1

defer_list=[]

urls=[
    'http://www.baidu.com',
    'http://www.bing.com',
    'https://www.python.org',
]

for url in urls:
    obj=getPage(url.encode('utf=-8'),)
    obj.addCallback(callback)
    defer_list.append(obj)

defer.DeferredList(defer_list).addBoth(all_done)
reactor.run()

# twisted的getPage的详细用法
from twisted.internet import reactor
from twisted.web.client import getPage
import urllib.parse


def one_done(arg):
    print(arg)
    reactor.stop()

post_data = urllib.parse.urlencode({'check_data': 'adf'})
post_data = bytes(post_data, encoding='utf8')
headers = {b'Content-Type': b'application/x-www-form-urlencoded'}
response = getPage(bytes('http://dig.chouti.com/login', encoding='utf8'),
                   method=bytes('POST', encoding='utf8'),
                   postdata=post_data,
                   cookies={},
                   headers=headers)
response.addBoth(one_done)

reactor.run()

猜你喜欢

转载自www.cnblogs.com/fqh202/p/9479378.html
今日推荐