python3scrapy模块爬取国家粮油信息中心的政策法规和产业信息标题、文章内容等信息

1、创建Scrapy项目

scrapy startproject Grain

2.进入项目目录,使用命令genspider创建Spider

scrapy genspider grain grainoil.com.cn

3、定义要抓取的数据(处理items.py文件)

# -*- coding: utf-8 -*-
import scrapy

class GrainItem(scrapy.Item):
    # 文章的目录
    news_path = scrapy.Field()

    # 文章的分类
    news_cate = scrapy.Field()
    # 文章标题
    news_title = scrapy.Field()
    # 文章发布日期
    news_date = scrapy.Field()
    # 文章来源
    news_source = scrapy.Field()
    # 文章导读
    news_guide = scrapy.Field()
    # 文章内容
    news_content = scrapy.Field()
    # 文章链接
    news_url = scrapy.Field()

4、编写提取item数据的Spider(在spiders文件夹下:grain.py)

# -*- coding: utf-8 -*-
# 爬取国家粮油信息中心的政策法规和产业信息标题、文章内容等信息
import scrapy
import re
from Grain.items import GrainItem
import requests
import os
from bs4 import BeautifulSoup
import datetime

class GrainSpider(scrapy.Spider):
    name = 'grain'
    allowed_domains = ['grainoil.com.cn']
    start_urls = ['http://www.grainoil.com.cn/newsListHome/1471.jspx',
                 'http://www.grainoil.com.cn/newsListChannel/18.jspx'
                  ]
    def parse(self, response):
        items = []
        # 获取下一页链接,location.href=encodeURI('18_1.jspx');
        next_url_list = response.xpath('//div[@class="container"]/div[@class="m_listpage"]/div[2]/div[@class="page-in"]/a/@onclick').extract()
        # 利用正则匹配出18_1.jspx中的 18_1,最后再进行组合,有18.jspx下划线匹配一次或0次
        patt = re.compile(r"location\.href=encodeURI\('(\d+_?\d+)\.jspx'\);")
        for i in range(len(next_url_list)):
            next_url = ''
            # result结果为'18_1'
            result = patt.search(next_url_list[i]).group(1)
            # 如果是18开头的那么属于政策法规的下一页
            if result[:2] == '18':
                next_url = "http://www.grainoil.com.cn/newsListChannel/" + result + ".jspx"
            # 如果是1471开头的属于产业信息的下一页
            elif result[:4] == "1471":
                next_url = "http://www.grainoil.com.cn/newsListHome/" + result +  ".jspx"
            yield scrapy.Request(url = next_url,callback=self.parse)
        # 获取每个文章的链接地址
        news_url = response.xpath('//div[@class="container"]/div[@class="m_listpage"]/div[2]/ol/li/a/@href').extract()
        # 因为获取的文章链接地址中有:80需要去掉,不去也可以
        for i in range(len(news_url)):
            item = GrainItem()
            # 这个是去掉:80的情况
            # item['news_url'] = 'http://www.grainoil.com.cn' + news_url[i][29:]
            item['news_url'] = news_url[i]
            items.append(item)
        for item in items:
            yield scrapy.Request(url = item['news_url'],meta={'meta_1':item},callback=self.parse_news)
    def parse_news(self,response):
        item = GrainItem()
        # 提取每次Response的meta数据
        meta_1 = response.meta['meta_1']
        try:
            # 获取文章的标题,有空格
            news_title = response.xpath('//div[@class="container"]/div[@class="m_details"]/div[@class="m_tit"]/h2/text()').extract()[0].strip()
            item['news_title'] = news_title
            # 获取文章导读
            html = requests.get(meta_1['news_url'])
            soup = BeautifulSoup(html.text,'lxml')
            # 存储获取p标签的文章内容
            content_list = []
            for i in soup.select("p"):
                content_list.append(i.get_text())
            # content_list前两个是发布时间和字号大小,不需要
            if len(content_list[2])!=0:
                # content_list_2存储去掉每个p标签内容首尾空格后的文章内容
                content_list_2 = []
                # 去掉每段话首尾空格http://www.grainoil.com.cn/ChannelPolicies/90735.jhtml
                # 每个列表后面六个是版权信息无用,为了避免文章内容字少导致导读里抓取到,直接去掉
                for i in range(len(content_list[2:-6])):
                    content_list_2.append(content_list[2:][i].strip())
                # 如果文章开头就是图片,会以#TRS_AUTOADD开头,直接导读是标题
                if content_list_2[0][:12] != '#TRS_AUTOADD':
                    # 如果文章长度不超过100,考虑到列表中的每个内容首尾可能有空格、换行符、空白符,需要去掉
                    content_join = "".join(content_list_2).replace('\n','').replace(" ","").replace('\u3000','').strip()
                    if len(content_join) >= 100:
                        news_guide = content_join[:100] + "......"
                    else:
                        news_guide = content_join + "......"
                else:
                    news_guide = news_title
            else:
                if len(content_list[3]) != 0:
                    if content_list[3][:11] != '中国粮食信息网版权所有':
                        content_list_3 = []
                        # 每个列表后面六个是版权信息无用,为了避免文章内容字少导致导读里抓取到,直接去掉
                        for i in range(len(content_list[3:-6])):
                            content_list_3.append(content_list[3:][i].strip())
                        # 如果文章长度不超过100
                        content_join = "".join(content_list_3).replace('\n','').replace(" ","").replace('\u3000','').strip()
                        if len(content_join) >= 100:
                            news_guide = content_join[:100] + "......"
                        else:
                            news_guide = content_join + "......"
                    # 还有文章内容可能是表格的情况,并不是p标签,直接获取标题为导读,获取到的p标签是版权信息
                    # http://www.grainoil.com.cn/ChannelPolicies/90729.jhtml
                    else:
                        news_guide = news_title
                # content_list[2]和[3]为0可能为只有一张图片,导读直接是文章标题
                # http://www.grainoil.com.cn/ChannelPolicies/90799.jhtml
                else:
                    news_guide = news_title
            item['news_guide'] = news_guide
            # 获取文章的来源
            news_source = response.xpath('//div[@class="container"]/div[@class="m_details"]/div[@class="m_tit"]/p[1]/a/text()').extract()[0].strip()
            if len(news_source) != 0:
                if news_source != '本站原创':
                    item['news_source'] = news_source
                else:
                    item['news_source'] = '国家粮油信息中心'
            else:
                item['news_source'] = '国家粮油信息中心'
            # 获取文章发布日期,需要调整 >>发布时间:2018-06-27 09:26:35  来源:
            news_date_list = response.xpath('//div[@class="container"]/div[@class="m_details"]/div[@class="m_tit"]/p[1]/text()').extract()[0]
            news_date = news_date_list.split(":")[1].split(" ")[0]
            item['news_date'] = news_date
            # 利用正则匹配文章的内容
            html = requests.get(meta_1['news_url'])
            # patt = re.compile(r'<div class="m_detailsbox" id="size">[\s\S]*?</div>')
            patt = re.compile(r'(<div class="m_detailsbox" id="size">[\s\S]*?)<div class="m_detailsbox" style="padding-top')
            result = patt.findall(html.text)
            news_content = ''
            for i in range(len(result)):
                news_content += result[i]
            # 发现文章中有个'oldsrc='有的是大写'OLDSRC=',会导致图片不显示,替换调
            # http://www.grainoil.com.cn/ChannelPolicies/90284.jhtml
            # 发现文章中有的图片路径地址不全都是src="/NewsDoc开头,需补全替换
            # http://www.grainoil.com.cn/ChannelPolicies/13743.jhtml
            # 有的文章结尾有符号']]>',去掉
            # http://www.grainoil.com.cn/ChannelPolicies/15469.jhtml
            item['news_content'] = news_content.replace('src="/NewsDoc','src="http://www.grainoil.com.cn/NewsDoc').replace('OLDSRC=','').replace('oldsrc=','').replace("]]>","").replace('href="javascript:void(0)"','').replace('宋体', '微软雅黑').replace('仿宋','微软雅黑').replace('Courier New','微软雅黑')
            item['news_url'] = meta_1['news_url']
            # 处理分类
            cate = response.xpath('//div[@class="container"]/div[@class="m_crumbs"]/text()|//div[@class="container"]/div[@class="m_crumbs "]/text()').extract()[1].strip()
            if cate == '政策法规':
                item['news_cate'] = cate
                # 创建目录的时候,不能包含一些特殊字符要转义
                char_list = ['*','|',':','?','/','<','>','"','\\']
                news_title_result = news_title
                for i in char_list:
                    if i in news_title :
                        news_title_result = news_title.replace(i,"_")
                news_path = './Data/'+ cate +"/" + news_date + "/" + news_title_result
                # 如果目录不存在则创建
                if (not os.path.exists(news_path)):
                    os.makedirs(news_path)
                item['news_path'] = news_path
                print("处理数据:%s" % news_path[7:])
            elif cate == '生产气象'or cate == '综合信息':
                item['news_cate'] = '产业信息'
                # 创建目录的时候,不能包含一些特殊字符要转义
                char_list = ['*', '|', ':', '?', '/', '<', '>', '"', '\\']
                news_title_result = news_title
                for i in char_list:
                    if i in news_title:
                        news_title_result = news_title.replace(i, "_")
                news_path = './Data/产业信息/' + news_date + "/" + news_title_result
                # 如果目录不存在则创建
                if (not os.path.exists(news_path)):
                    os.makedirs(news_path)
                item['news_path'] = news_path
                print("处理数据:%s" % news_path[7:])
            # d1 = datetime.datetime.strptime('2018-08-01', '%Y-%m-%d')
            # d2 = datetime.datetime.strptime(item['news_date'], '%Y-%m-%d')
            # if d2 > d1:
            #     print("%s日期大于2018-08-01数据处理中..." % item['news_date'])
            yield item
        # 发现这个网站有些文章是会员文章,无法查看
        # http://www.grainoil.com.cn/newsListChannel/18_63.jspx以后
        except:
            print("会员文章无法查看!",meta_1['news_url'])

5.处理pipelines管道文件保存数据,可将结果保存到文件中(pipelines.py)

# -*- coding: utf-8 -*-
import json
# 转码操作,继承json.JSONEncoder的子类
class MyEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, bytes):
            return str(o, encoding='utf-8')
        return json.JSONEncoder.default(self, o)


class GrainPipeline(object):
    def process_item(self, item, spider):
        try:
            file_name = item['news_title']
            file_name_result = file_name
            # 创建文件名字的时候,不能包含一些特殊字符要转义
            char_list = ['*', '|', ':', '?', '/', '<', '>', '"', '\\']
            for i in char_list:
                if i in file_name:
                    file_name_result = file_name.replace(i, "_")
            with open(item['news_path'] + "/" + file_name_result + ".html", "w+")as f:
                f.write(item['news_content'])
        except Exception as e:
            print(e)
            print("[ERROR]%s文件保存失败,请注意!" % item['news_title'])
        return item

    def close_spider(self, spider):
        print("数据保存本地处理完毕,谢谢使用!")

6.增加Grainpipelines.py文件,同时将数据保存到mysql数据库中

# -*- coding: utf-8 -*-
import json
import pymysql
import datetime

# 转码操作,继承json.JSONEncoder的子类
class MyEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, bytes):
            return str(o, encoding='utf-8')
        return json.JSONEncoder.default(self, o)


class DBPipeline(object):
    def __init__(self):
        # 连接数据库
        self.connect = pymysql.connect(
            host='localhost',
            port=3306,
            db='python3',
            user='root',
            passwd='123456',
            charset='utf8',
            use_unicode=True)
        # 通过cursor执行增删查改
        self.cursor = self.connect.cursor()
        # 来个计数器,统计写入了多少
        self.count = 0

    # @classmethod
    # def from_settings(cls, settings):
    #     dbargs = dict(
    #         host=settings['MYSQL_HOST'],
    #         db=settings['MYSQL_DBNAME'],
    #         user=settings['MYSQL_USER'],
    #         passwd=settings['MYSQL_PASSWD'],
    #         port=settings['MYSQL_PORT'],
    #         charset='utf8',
    #         cursorclass=pymysql.cursors.DictCursor,
    #         use_unicode=True,
    # )
    # dbpool = adbapi.ConnectionPool('pymysql', **dbargs)
    # return cls(dbpool)

    # def __init__(self,dbpool):
    #     self.dbpool=dbpool
    def process_item(self, item, spider):
        try:
            # 查重处理
            self.cursor.execute(
                """SELECT news_url FROM grainoil WHERE news_url = %s""", item['news_url'])
            # 是否有重复数据
            repetition = self.cursor.fetchone()
            # 重复
            if repetition:
                print("数据库已有此条数据,不再添加,URL地址为:", repetition[0])
            else:
                # d1 = datetime.datetime.strptime('2018-08-01', '%Y-%m-%d')
                # d2 = datetime.datetime.strptime(item['news_date'], '%Y-%m-%d')   
                # 可以有选择性的写入数据库数据
                # if d2>d1:
                #     print("%s日期大于2018-08-01写入数据库中..."%item['news_date'])
                print("写入数据库中...")
                # 插入数据
                self.cursor.execute(
                    """INSERT INTO grainoil(news_cate,news_title, news_date, news_source, news_guide ,
                      news_content, news_url)VALUES(%s,%s, %s, %s, %s, %s, %s)""",
                    (item['news_cate'], item['news_title'], item['news_date'], item['news_source'],
                     item['news_guide'], item['news_content'], item['news_url']))
                self.count += 1
            # 提交sql语句
            self.connect.commit()
        except Exception as error:
            # 出现错误时打印错误日志
            print("存入数据库错误为:",error)
        return item

    def close_spider(self, spider):
        self.cursor.close()
        self.connect.close()
        print("数据库处理完毕,本次共计增加%d条数据,谢谢使用!" % self.count)

7.配置settings文件(settings.py,这里通过设置调用数据库失败,暂时未解决)

# Obey robots.txt rules,具体含义参照:https://blog.csdn.net/z564359805/article/details/80691677      
ROBOTSTXT_OBEY = False 
 
# # 将数据保存在mysql
# MYSQL_HOST = 'localhost'
# MYSQL_DBNAME = 'python3'
# MYSQL_USER = 'root'
# MYSQL_PASSWD = '123456'
# MYSQL_PORT = 3306
 
 
# 下载延迟
DOWNLOAD_DELAY = 4 
# Override the default request headers:添加User-Agent信息      
DEFAULT_REQUEST_HEADERS = {      
  'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);',      
  # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',      
  # 'Accept-Language': 'en',      
}  
      
# Configure item pipelines去掉下面注释,打开管道文件      
ITEM_PIPELINES = {
    'Grain.pipelines.GrainPipeline': 100,
   'Grain.Grainpipelines.DBPipeline': 300,
 
}
      
# 还可以将日志存到本地文件中(可选添加设置)      
LOG_FILE = "grain.log"      
LOG_LEVEL = "DEBUG" 
# 包含打印信息也一起写进日志里
LOG_STDOUT = True

8.记得提前打开mysql数据库,并且建立好相应的表

# 创建国家粮油信息中心文章的数据库表
CREATE TABLE grainoil(id int PRIMARY KEY auto_increment not null,news_cate varchar(6),news_title varchar(100),news_date date,
news_source varchar(30),news_guide VARCHAR(150),news_content MEDIUMTEXT,news_url VARCHAR(90));

9.以上设置完毕,进行爬取:执行项目命令crawl,启动Spider:

scrapy crawl grain

猜你喜欢

转载自blog.csdn.net/z564359805/article/details/81561912