链家二手房爬虫

最近在看模拟登陆,找链家二手房爬取来练手,因为技术有限,有些并行化,复杂度,性能方面等不能兼顾,
学习学习学习。
下面贴出源码,有志同道合的高手可以帮忙改进或者提些意见。
#--*-- coding:utf-8 --*--

import pandas
import requests
import re
from bs4 import BeautifulSoup
from fake_useragent import UserAgent


ua=UserAgent()#使用随机header,模拟人类
headers1={'User-Agent':'ua.random'}#使用随机header
houseary=[]#建立空列表放房屋信息
for j in range(1,101):#爬取页数
    res=requests.get('http://sh.lianjia.com/ershoufang/d/'+str(j),headers=headers1)#爬取拼接域名
    for i in range(0,30):
        info = {}
        soup = BeautifulSoup(res.text, 'html.parser')
        url1=soup.select('.clear a')[0+4*i]['href']#选中class=clear下的a标签里的第J个元素的href子域名内容
        q=requests.get(url1)
        soup=BeautifulSoup(q.text,'html.parser')
        s=soup.select('.houseInfo')[0]
        pat='<div.*?mainInfo">(.*?)</div>'
        info['房子格局']=''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.houseInfo')[0]
        pat = '<div.*subInfo">(.*?)</div>'
        info['楼层信息'] = ''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.houseInfo div')[4]
        pat = '<div.*?mainInfo".*?>(.*?)</div>'
        info['朝向'] = ''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.houseInfo div')[5]
        pat = '<div.*?subInfo">(.*?)</div>'
        info['装修情况'] = ''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.houseInfo div')[7]
        pat = '<div.*?mainInfo">(.*?)</div>'
        info['房子面积'] = ''.join(list(re.compile(pat).findall(str(s))))

        s = soup.select('.price')[0]
        pat = '<span.*?total">(.*?)</span>'
        info['房子总价(单位/万)'] = ''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.price')[0]
        pat = '<span.*?unitPriceValue">(.*?)<i>'
        info['单价(元/平米)'] = ''.join(list(re.compile(pat).findall(str(s))))

        s = soup.select('.houseInfo div')[8]
        pat = '<div.*?subInfo">(.*?)</div>'
        info['建楼年份'] = ''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.aroundInfo div')[0]
        pat = '<a.*_blank">(.*?)</a>'
        info['小区名称'] = ''.join(list(re.compile(pat).findall(str(s))))
        s = soup.select('.aroundInfo div')[1]
        pat = '<a.*?>(.*?)</a>'
        info['所在区域'] = ''.join(list(re.compile(pat).findall(str(s))))
        #print(info)
        houseary.append(info)
data=pandas.DataFrame(houseary)
data.to_excel('链家_二手房.xlsx')






猜你喜欢

转载自blog.csdn.net/eason_oracle/article/details/79993457