Python爬虫----Scrapy的简单使用

         一、新建工程

                scrapy startproject doubandemo

                进入doubandemo/spiders下,初始化spider

                scrapy genspider douban_spider movie.douban.com

         二、为了方便,在Pycharm中进行可视化操作

                项目目录如下图所示:

                                      

                (1)创建main.py避免每次都都需要在命令行中启动爬虫

main.py

# 设置启动文件,就不用每次都去命令行中执行启动操作

from scrapy import cmdline
cmdline.execute('scrapy crawl douban_spider'.split())

                (2)在items.py中自定义数据接口

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy

# 明确要抓取的内容,如电影名称,星级,导演以及描述等属性


class DoubandemoItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    serial_num = scrapy.Field()  # 排名
    movie_name = scrapy.Field()  # 电影名
    introduce = scrapy.Field()   # 介绍
    star = scrapy.Field()        # 星级
    evaluation = scrapy.Field()  # 评价
    description = scrapy.Field()  # 描述

                (3)在spiders/xxx_spider.py(自定义爬虫)中编写数据解析规则

douban_spider.py

# -*- coding: utf-8 -*-
import scrapy
from douban.items import DoubandemoItem


# 负责写XPath和正则表达式


class DoubanSpiderSpider(scrapy.Spider):
    name = 'douban_spider'  # 爬虫名
    allowed_domains = ['movie.douban.com']  # 允许的域名
    start_urls = ['https://movie.douban.com/top250']  # 入口url,扔到调度器里面去

    # 默认的解析方法

    def parse(self, response):
        movie_list = response.xpath("//div[@class='article']//ol[@class='grid_view']//li")
        for item in movie_list:
            # 导入文件
            douban_item = DoubandemoItem()
            # 写详细的xpath来进行解析
            douban_item['serial_num'] = item.xpath(".//div[@class='item']//em/text()").extract_first()
            douban_item['movie_name'] = item.xpath(".//div[@class='info']//div[@class='hd']/a/span[1]/text()").extract_first()
            content = item.xpath(".//div[@class='info']//div[@class='bd']/p[1]/text()").extract()
            #douban_item['introduce'] = content
            for i_content in content:
                content_s = "".join(i_content.split())
                douban_item['introduce'] = content_s

            douban_item['star'] = item.xpath(".//div[@class='info']//div[@class='star']/span[2]/text()").extract_first()
            douban_item['evaluation'] = item.xpath(".//div[@class='info']//div[@class='star']/span[4]/text()").extract_first()
            douban_item['description'] = item.xpath(".//div[@class='info']//span[@class='inq']/text()").extract_first()
            yield douban_item  # 将数据传送到管道

        # 解析下一页

        next_link = response.xpath("//div[@class='article']//div[@class='paginator']//span[@class='next']/link/@href").extract()  # 获取下一页的链接
        if next_link:
            next_link = next_link[0]
            # 提交给调度器
            yield scrapy.Request("https://movie.douban.com/top250"+next_link, callback=self.parse)


                (4)可以启动爬虫进行尝试爬去数据,可能会报错,这时需要设置请求头的USER_AGENT字段

                         在setting.py中添加该字段:

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'

                (5)数据存储

                       1.  可以将爬取到的数据导出为.json或者.csv文件,操作如下

scrapy crawl douban_spider -o test.json
scrapy crawl douban_spider -o test.csv

                       2.  可以将爬取到的数据保存到mongodb

                            A.先在setting.py中设定mongodb的变量

mongo_host = '127.0.0.1'  # 数据库ip地址
mongo_port = 27017  # 数据库端口
mongo_db_name = 'douban'  # 数据库名
mongo_db_collection = 'douban_movie'  # 数据库表名

                            B.在pipelines.py中编写存储代码

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import pymongo
from douban.settings import mongo_host,mongo_port,mongo_db_name,mongo_db_collection


class DoubanPipeline(object):
    def __init__(self):  # 构造函数
        host = mongo_host
        port = mongo_port
        db_name = mongo_db_name
        db_collection = mongo_db_collection
        client = pymongo.MongoClient(host=host,port=port)  # 取得mongodb的链接
        my_db = client[db_name]  # 取得数据库
        self.post = my_db[db_collection]  # 取得表,然后就可以进行数据的添加

    def process_item(self, item, spider):
        # 向mongodb写入数据
        data = dict(item)  # 将数据转化为字典结构
        self.post.insert(data)
        return item

                            C.在settings.py中打开pipelines

ITEM_PIPELINES = {
    'douban.pipelines.DoubanPipeline': 300,
}

                (5)ip中间件编写

                         主要是对爬虫进行伪装,避免对对方防火墙发现。两种伪装方式:设置代理ip和设置随机USER-AGENT

                         设置代理ip:在middlewares.py中进行编写,然后在settings.py中开启中间件:

# 需要import base64

class my_proxy(object):  # 代理ip类
    def process_request(self,request,spider):
        request.meta['proxy'] = 'http-cla.abuyun.com:9030'  # 设置代理服务器的ip地址和端口号
        proxy_name_pass = b'username:password'  # 代理用户名和密码
        encode_name_pass = base64.encode(proxy_name_pass)  # 加密
        request.headers['Proxy-Authorization'] = 'Basic '+encode_name_pass.decode()
DOWNLOADER_MIDDLEWARES = {
    'douban.middlewares.my_proxy': 543,
  # 'douban.middlewares.DoubanDownloaderMiddleware': 543,
}

                         设置随机USER-AGENT:同样地,在middlewares.py中进行编写,然后在settings.py中开启中间件:

# 需要import random

class my_useragent(object):  # 随机设置user-agent来伪装
    def process_request(self,request,spider):
        # 设置user-agent列表,可以在网上查找
        user_agents = [
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
            "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
            "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
            "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
            "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
            "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
            "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
        ]
        agent = random.choice(user_agents)  # 随机选择
        request.headers['User_Agent'] = agent
DOWNLOADER_MIDDLEWARES = {
   #  'douban.middlewares.my_proxy': 543,
   # 'douban.middlewares.DoubanDownloaderMiddleware': 543,
   'douban.middlewares.my_useragent': 543,
}

                         运行后显示如下,则证明启动了中间件:

2019-05-04 23:55:12 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
 'douban.middlewares.my_useragent',

                (6)注意事项

                         #中间件定义完后要在settings.py文件中启用

                         #爬虫文件名和爬虫名称不能相同,spiders目录内不能存在相同爬虫名称的项目文件

猜你喜欢

转载自blog.csdn.net/weixin_39071173/article/details/89716790