北邮 python 爬虫爬取链家的新房数据进行数据处理

时间:2021-2-20 作者:admin

博主声明:用途仅供学习


items.py


import scrapy


class MyItem(scrapy.Item):
    # define the fields for your item here like:
    name = scrapy.Field()    # 名称
    place1 = scrapy.Field()   # 地理位置
    place2 = scrapy.Field()
    place3 = scrapy.Field()
    model = scrapy.Field()   # 房型
    aera = scrapy.Field()   # 面积
    totalprice = scrapy.Field()   # 总价
    UnitPrice = scrapy.Field()    # 单价
    unit = scrapy.Field()    # 价格单位

spider.py

import scrapy
from linajia.items import MyItem  # 从items.py中引入MyItem对象

class mySpider(scrapy.spiders.Spider):
    name = "linajia"  # 爬虫的名字是linajia
    allowed_domains = ["bj.lianjia.com/"]  # 允许爬取的网站域名
    start_urls = ["https://bj.fang.lianjia.com/loupan/"]
    # 多页爬取
    for pg in range(2, 20):
        start_urls.append("https://bj.fang.lianjia.com/loupan/pg{}/".format(pg))
    # 减慢爬虫速度,保证顺序不乱序
    download_delay = 1

    def parse(self, response):  # 解析爬取的内容
        item = MyItem()  # 生成一个在items.py中定义好的Myitem对象,用于接收爬取的数据
        for each in response.xpath('/html/body/div[4]/ul[2]/li'):
            try:
                item['name'] = each.xpath("div/div[1]/a/text()").extract()[0]
                item['place1'] = each.xpath("div/div[2]/span[1]/text()").extract()[0]
                item['place2'] = each.xpath("div/div[2]/span[2]/text()").extract()[0]
                item['place3'] = each.xpath("div/div[2]/a/text()").extract()[0]
                #  取最小户型
                l = each.xpath("div/a/span[1]/text()").extract()
                if len(l) == 0:  # 最小户型的数据可能不存在,进行判断,如果不存在,那么赋值为''
                    item['model'] = ''
                else:
                    item['model'] = l[0]
                # item['aera']取最小面积
                l1 = each.xpath("div/div[3]/span/text()").extract()
                if len(l1):   # 最小面积的数据存在时,进行提取最小值
                    str = l1[0]
                    startpos = str.find(" ") + 1
                    endpos = str.find("-")
                    if endpos == -1:
                        endpos = str.find("m")
                    item['aera'] = str[startpos: endpos]
                else:   # 最小面积不存在时,赋值为空串''
                    item['aera'] = ''
                # item['totalprice']
                l2 = each.xpath("div/div[6]/div[2]/text()").extract()
                # item['UnitPrice']
                l3 = each.xpath("div/div[6]/div[1]/span[1]/text()").extract()
                unit = each.xpath("div/div[6]/div/span[2]/text()").extract()

                # 由于存在网页显示均值的位置可能出现总价,那么进行如果进行不处理读取,会导致某些行的数据
                # 在均值的位置显示总价,而总价的位置显示为空
                if -1 != unit[0].find("总价"):
                    item['totalprice'] = l3[0]   # 将均值处显示的总价放置于总价的位置
                    item['UnitPrice'] = ''
                else:
                    if len(l3) == 0:
                        item['UnitPrice'] = ''
                    else:
                        item['UnitPrice'] = l3[0]
                    if len(l2) == 0:
                        item['totalprice'] = ''
                    else:
                        item['totalprice'] = l2[0]
                yield item
            except ValueError:
                pass

DataProcess.py

import numpy as np
import pandas as pd

# 打开CSV文件
fileNameStr = 'MyData.csv'
orig_df = pd.read_csv(fileNameStr, encoding='gbk', dtype=str)

# 1.将字符串的列前后空格去掉
orig_df['name'] = orig_df['name'].str.strip()
orig_df['place1'] = orig_df['place1'].str.strip()
orig_df['place2'] = orig_df['place2'].str.strip()
orig_df['place3'] = orig_df['place3'].str.strip()
orig_df['model'] = orig_df['model'].str.strip()
orig_df['aera'] = orig_df['aera'].str.strip()
orig_df['totalprice'] = orig_df['totalprice'].str.strip()
orig_df['UnitPrice'] = orig_df['UnitPrice'].str.strip()

# 2.将aera变为整型
orig_df['aera'] = orig_df['aera'].fillna(0).astype(np.int)

# 3.将单价变为整型
orig_df['UnitPrice'] = orig_df['UnitPrice'].fillna(0).astype(np.int)

# 3.价格处理
orig_df['totalprice'] = orig_df['totalprice'].str.replace("总价", "")
orig_df['totalprice'] = orig_df['totalprice'].str.replace("万/套", "")
orig_df['totalprice'] = orig_df['totalprice'].fillna(0).astype(np.int)

# 4.总价计算
for idx, row in orig_df.iterrows():
    if orig_df.loc[idx, 'totalprice'] == 0:
        orig_df.loc[idx, 'totalprice'] = (orig_df.loc[idx, 'aera'] * orig_df.loc[idx, 'UnitPrice']) // 10000
    if orig_df.loc[idx, 'UnitPrice'] != 0:
        orig_df.loc[idx, 'UnitPrice'] = '%.4f' % (orig_df.loc[idx, 'UnitPrice'] / 10000)
    elif orig_df.loc[idx, 'UnitPrice'] == 0:
        orig_df.loc[idx, 'UnitPrice'] = '%.4f' % (orig_df.loc[idx, 'totalprice'] / orig_df.loc[idx, 'aera'])
    # 将填补的aera为空处复原

# 5.面积复原,将填充的0去掉
orig_df['aera'] = orig_df['aera'].astype(np.str)
for idx, row in orig_df.iterrows():
    if orig_df.loc[idx, 'aera'] == '0':
        orig_df.loc[idx, 'aera'] = ''

# 6.总价
# 最大值
print("总价:")
imaxpos = orig_df['totalprice'].idxmax()
print("最贵房屋", orig_df.loc[imaxpos, "totalprice"], orig_df.loc[imaxpos, "name"])
# 最小值
iminpos = orig_df['totalprice'].idxmin()
print("最便宜房屋", orig_df.loc[iminpos, "totalprice"], orig_df.loc[iminpos, "name"])
# 中位数
print("中位数", orig_df['totalprice'].median())

# 7.单价
# 最大值
print("单价:")
idmaxpos = orig_df['UnitPrice'].astype(float).idxmax()
print("最贵房屋", orig_df.loc[idmaxpos, "UnitPrice"], orig_df.loc[idmaxpos, "name"])
# 最小值
idminpos = orig_df['UnitPrice'].astype(float).idxmin()
print("最便宜房屋", orig_df.loc[idminpos, "UnitPrice"], orig_df.loc[idminpos, "name"])
# 中位数
print("中位数", orig_df['UnitPrice'].median())

orig_df.to_csv("NewMydata.csv", header=True, encoding="gbk", mode='w+', index=False)

处理结果
北邮 python 爬虫爬取链家的新房数据进行数据处理

声明:本文内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎进行举报,并提供相关证据,工作人员会在5个工作日内联系你,一经查实,本站将立刻删除涉嫌侵权内容。