爬虫之汽车之家(正则表达式)

#!/usr/bin/env python
#-*- coding:utf-8 -*-
# requests:用来下载网页源代码的,等同urlopen()
#Beautiful Soup,解析html,替代正则部分re
#Html
#BeautifulSoup().find("a")
import requests
import bs4
from bs4 import BeautifulSoup

#拿到汽车之家的首页源代码
#urlopen(url).read.decode
main_page_content=requests.get("https://www.autohome.com.cn/weifang/").text
#把页面源代码交给bs4解析
main_page=BeautifulSoup(main_page_content,"html.parser")
#可以进行标签的定位
main_div=main_page.find(name="div",attrs={"class":"people-content"})
main_ul=main_div.find(name="ul",attrs={"class":"list-text"})
main_a_lst=main_ul.find_all("a")#这是一个列表,a标签列表
n=1
for a in main_a_lst:
    #从标签中获取一个属性
    one_page_url="https:"+a.get("href")
    print("====>",one_page_url)
    one_page_content=requests.get(one_page_url).text
    one_page_content=one_page_content.replace("</br>","")#替换掉所有的</br>
    one_page=BeautifulSoup(one_page_content,"html.parser")
    img_lst=one_page.find("div",attrs={"class":"journey-item-list"}).find_all("img")
    for img in img_lst:
        #拿到图片的的src
        download_url=img.get("data-original")
        if not download_url:
            download_url = img.get("src")
        print(download_url)
        #下载图片
        f = open("img/汽车之家图片%s.jpg"% n,mode="wb")
        f.write( requests.get(download_url).content)
        f.close()
        n=n+1
        print("你成功的从汽车之家偷走了一张图片")

猜你喜欢

转载自www.cnblogs.com/tengteng0520/p/11275530.html