python爬虫--猫眼电影TOP100榜爬取

import requests
from requests.exceptions import RequestException
import re
import json
import time


def write_to_file(content):
    with open('result.txt', 'a', encoding='UTF-8') as f:
        f.write(json.dumps(content, ensure_ascii=False)+'\n')


def parse_one_page(html):
    html = re.sub('\n| ', '', html)
    pattern = re.compile(
        '<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a*?title='
        '"(.*?)".*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)'
        '</i>.*?fraction.*?>(.*?)</i>.*?</dd>', re.S
    )
    items = re.findall(pattern, html)
    for item in items:
        yield {
    
    
            'index': item[0],
            'image': item[1],
            'title': item[2].strip(),
            'actor': item[3].strip()[3:] if len(item[3]) > 3 else '',
            'time': item[4].strip()[5:] if len(item[4]) > 5 else '',
            'score': item[5].strip()+item[6].strip()
        }


def get_one_page(url, proxy=None):
    try:
        headers = {
    
    
            'Cookie': '',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                          '(KHTML, like Gecko) Chrome/86.0.4209.2 Safari/537.36'
        }
        response = requests.get(url, headers=headers, proxies=proxy)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException as e:
        return None


def get_proxy():
    res = requests.get("")
    return res.text


def main(offset):
    proxy = {
    
    
        'http': get_proxy()
    }
    url = 'https://maoyan.com/board/4?offset='+str(offset)
    html = get_one_page(url, proxy=proxy)
    # # with open('maoyan.html', 'w') as f:
    # #     f.write(html)
    for item in parse_one_page(html):
        write_to_file(item)


if __name__ == '__main__':
    for i in range(10):
        main(offset=i*10)
        time.sleep(1)

猜你喜欢

转载自blog.csdn.net/hide_in_darkness/article/details/108272510