python3.6 scrapy模块查询POS后台获取指定时间和状态的订单存入到excel表格中

版权声明:版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/z564359805/article/details/86495488

根据用户输入的日期区间,获取已完成和配送中的相关订单信息,并生成表格。
一个订单可能包含多个产品,所以会有重复的订单号出现,其中运费、折扣金额、优惠券支付是根据当前订单中产品数量算出的平均值。

1、创建Scrapy项目

scrapy startproject Order

2.进入项目目录,使用命令genspider创建Spider

scrapy genspider order XXXX.com

3、定义要抓取的数据(处理items.py文件)

# -*- coding: utf-8 -*-
import scrapy

class OrderItem(scrapy.Item):
    # 序号
    number = scrapy.Field()
    # 订单号
    order_number = scrapy.Field()
    # 卖家(企业)
    order_company = scrapy.Field()
    # 客户信息
    order_client = scrapy.Field()
    # 获取订单金额
    order_money = scrapy.Field()
    # 下单时间
    order_time = scrapy.Field()
    # 订单来源
    order_source = scrapy.Field()
    # 订单状态
    order_state = scrapy.Field()
    # 签收时间
    sign_time = scrapy.Field()
    # 商品名称
    product_name = scrapy.Field()
    # 商品分类
    product_class = scrapy.Field()
    # 商品金额小计
    product_money = scrapy.Field()
    # 平均运费
    freight = scrapy.Field()
    # 折扣金额
    discount_money = scrapy.Field()
    # 优惠券支付
    voucher = scrapy.Field()
    # 查询时间
    select_time = scrapy.Field()

4、编写提取item数据的Spider(在spiders文件夹下:order.py)

# -*- coding: utf-8 -*-
# 1、根据用户输入的日期区间,获取已完成和配送中的相关订单信息,并生成表格。
# 2、一个订单可能包含多个产品,所以会有重复的订单号出现,其中运费、折扣金额、优惠券支付是根据当前订单中产品数量算出的平均值。

import scrapy
import re,time
from Order.items import OrderItem

class OrderSpider(scrapy.Spider):
    name = 'order'
    allowed_domains = ['XXXX.com']
    login_page = "https://pos.XXXX.com/login.html"
    start_urls = ['https://pos.XXXX.com/order/tolist.html']
    username = input("请输入账号:")
    password = input("请输入密码:")

    def start_requests(self):
        yield scrapy.Request(url=self.login_page,callback=self.login)

    def login(self,response):
        yield scrapy.FormRequest.from_response(
            response,
            formdata={"j_username":self.username, "j_password":self.password},
            callback = self.parse_page
        )
    def parse_page(self,response):
        if "loginerror" in response.body.decode("utf-8"):
            print("登录失败,错误的手机号或密码")
        if "</span>首页" in response.body.decode("utf-8"):
            print("欢迎您'%s',成功登录POS后台管理系统!"%(self.username))
            yield scrapy.Request(self.start_urls[0],callback=self.order_list)
    def order_list(self,response):
        # 存储用户输入的日期,前两个是已完成状态的日期,后两个是配送中的日期
        self.date_list = []
        # 日期格式正则规则
        patt = re.compile(r"\d{4}-\d{1,2}-\d{1,2}")
        for self.state in ['100','40']:
            while True:
                print("\n" + "=" * 87)
                print("开始日期如果不输入则为2015年最早日期,假设为2015-1-1,结束日期如果不输入则为当天日期。")
                print("注意:已完成订单中,如果开始和结束日期都不输入,则把签收日期为空的也会筛选出来。")
                print("=" * 87 + "\n")
                if self.state == "100":
                    self.start_date = input("请输入已完成签收的开始日期(格式:2018-11-30):")
                    self.end_date = input("请输入已完成签收的结束日期(格式:2018-11-30):")
                else:
                    self.start_date = input("请输入配送中的下单开始日期(格式:2018-11-30):")
                    self.end_date = input("请输入配送中的下单结束日期(格式:2018-11-30):")
                re_start_date = patt.findall(self.start_date)
                re_end_date = patt.findall(self.end_date)
                if self.start_date != '':
                    if len(re_start_date) != 0:
                        # 结束日期可以不输入
                        if self.end_date == '':
                            break
                        else:
                            if len(re_end_date) == 0:
                                print("ERROR:结束日期格式输入有误,请重新输入!")
                                continue
                            else:
                                break
                    else:
                        if self.end_date != '':
                            if len(re_end_date) == 0:
                                print("ERROR:开始和结束日期格式输入有误,请重新输入!")
                                continue
                        else:
                            print("ERROR:开始日期格式输入有误,请重新输入!")
                            continue
                else:
                    if self.end_date != '':
                        if len(re_end_date) == 0:
                            print("ERROR:结束日期格式输入有误,请重新输入!")
                            continue
                        else:
                            break
                    else:
                        break
            self.date_list.append(self.start_date)
            self.date_list.append(self.end_date)
            yield scrapy.FormRequest.from_response(
                response,
                formdata={
                    # "d-5481-p": "1",
                    "to.buyer": "",
                    "to.endDate": self.end_date,
                    "to.id": "",
                    # 订单状态,-1代表全部
                    "to.orderSource": "-1",
                    "to.seller": "",
                    "to.startDate": self.start_date,
                    # 已完成是100,配送中是40
                    "to.status": self.state,
                },
                callback=self.parse)
        # print("退出系统。。。") # 这个无法实现,未等产品获取完毕系统就已退出导致获取不到数据
        # yield scrapy.Request('https://pos.XXXX.com/j_spring_security_logout')
    def parse(self, response):
        # 获取当天的日期
        today_time = time.strftime("%Y-%m-%d", time.localtime())
        if self.date_list[1] == '':
            self.date_list[1] = today_time
        if self.date_list[3] == '':
            self.date_list[3] = today_time
        if self.date_list[0] == '':
            self.date_list[0] = '2015-1-1'
        if self.date_list[2] == '':
            self.date_list[2] = '2015-1-1'
        items = []
        next_url_list = list(set(response.xpath('//span[@class="pagelinks"]/a/@href').extract()))
        # 获取查询结果的数据,计算有多少页,是一个列表
        # page_list = response.xpath('//*[@id="signForm"]/span[1]/text()').extract()
        # if len(page_list) == 0:
        #     print("抱歉,没有查询到相应条件的商品!")
        # else:
        #     # 如果只有一条数据会显示One
        #     if page_list[0].split(" ")[0] != "One":
        #         total = int(page_list[0].split(" ")[0].replace(",", ""))
        #     else:
        #         total = 1
        #     if total % 20 == 0:
        #         end_page = total // 20
        #         print('共获取到%d项订单数据,合计%d页' % (total, end_page))
        #     else:
        #         end_page = total // 20 + 1
        #         print('共获取到%d项订单数据,合计%d页' % (total, end_page))
        order_state = ""
        for each in response.xpath('//div[@class="dataTables_wrapper"]'):
            # 序号
            number = each.xpath('//*[@id="to"]/tbody/tr/td[1]/text()').extract()
            # 订单号
            order_number = each.xpath('//*[@id="to"]/tbody/tr/td[2]/a/text()').extract()
            # 卖家(企业)
            order_company = each.xpath('//*[@id="to"]/tbody/tr/td[3]/a/text()').extract()
            # 客户信息
            order_client = each.xpath('//*[@id="to"]/tbody/tr/td[4]/text()').extract()
            # 获取订单金额
            order_money = each.xpath('//*[@id="to"]/tbody/tr/td[5]/text()').extract()
            # 下单时间,首尾无空格
            order_time = each.xpath('//*[@id="to"]/tbody/tr/td[6]/text()').extract()
            # 订单来源
            order_source = each.xpath('//*[@id="to"]/tbody/tr/td[10]/text()').extract()
            # 订单状态
            order_state = each.xpath('//*[@id="to"]/tbody/tr/td[11]/text()').extract()
            # 签收时间
            sign_time = each.xpath('//*[@id="to"]/tbody/tr/td[7]/text()').extract()
            for i in range(len(number)):
                item = OrderItem()
                item['number'] = number[i].strip()
                item['order_number'] = order_number[i].strip()
                item['order_company'] = order_company[i].replace("\n", "").replace("\t", "").replace("\r", "").\
                    replace(" ","").replace(" ", "").strip()
                item['order_client'] = order_client[i].strip()
                item['order_money'] = order_money[i].strip()
                item['order_time'] = order_time[i].strip()
                item['order_source'] = order_source[i].strip()
                item['order_state'] = order_state[i].strip()
                if order_state[0].strip() == "已完成":
                    item['sign_time'] = sign_time[i].strip()
                    item['select_time'] = "查询签收日期:"+self.date_list[0]+"至"+self.date_list[1]
                # 配送中的订单没有签收时间
                else:
                    item['sign_time'] = ""
                    item['select_time'] = "查询下单日期:" + self.date_list[2] + "至" + self.date_list[3]
                items.append(item)
        # 根据订单号获取到产品链接
        for item in items:
            id_url = 'https://pos.XXXX.com/order/showto.html?to.id='+item['order_number']
            yield scrapy.Request(url=id_url,meta={'meta_1':item},callback=self.parse_id)
        # 处理下一页
        for page in next_url_list:
            if page[10:11] != '1':
                print("总共%s页,订单状态:%s"%((page[10:11]),order_state[0].strip()))
                order_list_url = "https://pos.XXXX.com/order/tolist.html" + str(page)
                # 无需处理第一页,因为order_list传过来的response已经处理
                yield scrapy.Request(url = order_list_url,callback=self.parse)
    # 根据订单号获取产品名称和运费、价格等
    def parse_id(self,response):
        meta_1 = response.meta['meta_1']
        item = OrderItem()
        # with open(meta_1['order_number']+".html","w",encoding='utf-8')as f:
        #     f.write(response.text)
        # 获取商品名称,注意xpath的规则,//tr而不是/tbody/tr,有些是浏览器自动加的
        product_name = response.xpath('//*[@id="order_items"]//tr/td[1]/text()').extract()
        # product_name = response.xpath('//*[@id="order_items"]/tbody/tr/td[1]/text()').extract()
        # 获取商品分类
        # product_class = response.xpath('//div[@id="tabs-2"]/table[@id="order_items"]/tbody/tr/td[2]/text()').extract()
        product_class = response.xpath('//*[@id="order_items"]//tr/td[2]/text()').extract()
        # 商品金额小计
        product_money = response.xpath('//*[@id="order_items"]//tr/td[6]/text()').extract()
        # 运费,平分
        freight = response.xpath('//*[@id="order_items"]//tr[2]/th[2]/span/text()').extract()
        # 折扣金额,平分
        discount_money = response.xpath('//*[@id="order_items"]//tr[3]/th[2]/text()').extract()
        # 优惠券支付,平分
        voucher = response.xpath('//*[@id="order_items"]//tr[5]/th[2]/text()').extract()
        # 获取该订单下有多少个产品分类
        length = len(product_name)
        print("状态:%s,订单号:%s,有%s个产品,处理中...."%(meta_1['order_state'],meta_1['order_number'],length))
        for i in range(length):
            item['product_name'] = product_name[i].strip()
            item['product_class'] = product_class[i].strip()
            item['product_money'] = product_money[i].strip()
            item['freight'] = float(freight[0].strip())/length
            item['discount_money'] = float(discount_money[0].strip())/length
            item['voucher'] = float(voucher[0].strip())/length
            item['number'] = meta_1['number']
            item['order_number'] = meta_1['order_number']
            item['order_company'] = meta_1['order_company']
            item['order_client'] = meta_1['order_client']
            item['order_money'] = meta_1['order_money']
            item['order_time'] = meta_1['order_time']
            item['order_source'] = meta_1['order_source']
            item['order_state'] = meta_1['order_state']
            item['sign_time'] = meta_1['sign_time']
            item['select_time'] = meta_1['select_time']
            yield item

5.处理pipelines管道文件保存数据,可将结果保存到文件中(pipelines.py)

# -*- coding: utf-8 -*-
import json
from openpyxl import Workbook
import time

# 转码操作,继承json.JSONEncoder的子类
class MyEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, bytes):
            return str(o, encoding='utf-8')
        return json.JSONEncoder.default(self, o)

class OrderPipeline(object):
    def __init__(self):
        self.wb = Workbook()
        self.ws = self.wb.active
        self.ws.title = '订单数量列表'
        # 冻结首行
        self.ws.freeze_panes = 'A2'
        self.ws.column_dimensions['C'].width = 19
        self.ws.column_dimensions['D'].width = 21
        self.ws.column_dimensions['J'].width = 23
        self.ws.column_dimensions['N'].width = 9.4
        self.ws.column_dimensions['O'].width = 18
        self.ws.column_dimensions['P'].width = 35
        # 创建表头
        self.ws.append(['序号', '订单号', '卖家', '客户',
                        '订单金额', '下单时间',
                        '订单来源', '订单状态', '商品名称',
                        '商品分类','商品金额小计','平均运费',
                        '折扣金额','优惠券支付','签收时间','查询日期'])

    def process_item(self, item, spider):
        text = [item['number'], item['order_number'], item['order_company'], item['order_client'],
                item['order_money'], item['order_time'],
                item['order_source'], item['order_state'], item['product_name'],
                item['product_class'],item['product_money'],item['freight'],
                item['discount_money'],item['voucher'],item['sign_time'],item['select_time']]
        self.ws.append(text)
        return item

    def close_spider(self, spider):
        # 给保存的文件名字加上个当天的日期年月日
        file_end_name = time.strftime("%Y-%m-%d", time.localtime())
        self.wb.save("订单数量列表" + file_end_name + '.xlsx')
        print("数据处理完毕,谢谢使用!")

6.配置settings文件(settings.py)

# Obey robots.txt rules,具体含义参照:https://blog.csdn.net/z564359805/article/details/80691677      
ROBOTSTXT_OBEY = False  
      
# Override the default request headers:添加User-Agent信息      
DEFAULT_REQUEST_HEADERS = {      
  'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);',      
  # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',      
  # 'Accept-Language': 'en',      
}  
      
# Configure item pipelines去掉下面注释,打开管道文件      
ITEM_PIPELINES = {      
     'Order.pipelines.OrderPipeline': 300,
}  
      
# 还可以将日志存到本地文件中(可选添加设置)      
LOG_FILE = "order.log"      
LOG_LEVEL = "DEBUG" 
# 包含打印信息也一起写进日志里
LOG_STDOUT = True

7.以上设置完毕,进行爬取:执行项目命令crawl,启动Spider:

scrapy crawl order

猜你喜欢

转载自blog.csdn.net/z564359805/article/details/86495488