Task2——数据分析

1. 赛题理解

此次数据挖掘的目标为二手车交易价格预测,该问题本质上是一个回归问题。
(1)数据集介绍
数据总量超过370000条,随机抽取其中10万条作为训练数据集,5万条作为测试集A,5万条作为测试集B。数据共包含20个特征变量。
(2)评估指标
对于分类和回归的问题应当采用不同的评估指标。
分类:

  • 二分类:accuracy, precision, recall, F-score, pr曲线,AUC/ROC
  • 多分类:accuracy, 宏平均, 微平均, F-score

回归:
MAE、MSE、MAPE、RMSE、 R 2 R^2

(3)包导入介绍

#  Step 1:数据读取
import pandas as pd  #用于读取数据
train_data = pd.read_csv('used_car_train_20200313.csv', delimiter=' ', header=0)
test_data = pd.read_csv('used_car_testA_20200313.csv', delimiter=' ', header=0)
# 数据大致信息查看
print('train data shape:', train_data.shape)  # 打印训练数据的形状 (150000,31)
print(train_data.head())    # 打印训练数据的前5行
print(train_data.info())    # 打印训练数据每个特征变量的信息,包括有无空值及数据类型等
print(train_data.columns)  # 打印训练数据的列名称
print(train_data.describe())  # 打印训练数据各列的统计信息,包括count、mean、std、min、25%、50%、75%、max
print(train_data.head().append(train_data.tail)) #数据首尾查看

# Step 2:评价指标的导入
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score    # 分类问题
from sklearn.metrics import mean_absolute_error,mean_squared_error, r2_score  #回归问题
# MAPE和RMSE需要自己实现
# MAPE
def mape(y_true, y_pred):
	return np.mean(np.abs((y_pred - y_true) / y_true))
# RMSE
from sklearn import metrics
import numpy as np
print('RMSE:',np.sqrt(metrics.mean_squared_error(y_true, y_pred)))

2. EDA-数据探索性分析

数据探索性分析的目的在于初步了解和熟悉数据集,了解变量间的相互关系以及变量与预测值之间的关系,是进行特征工程前的重要步骤。主要内容包括:

  • 判断是否存在确实和异常;若存在,如何填补缺失?如何剔除异常?
  • 了解预测值的分布;包括总体分布情况,偏态、峰态、具体频数等
  • 数字特征分析;包括数字特征间的相关性分析、特征值的偏度和峰值、特征分布的可视化、多变量互相回归关系可视化(主要确定是否存在正态分布,若不服从正态分布要考虑如何正态化)
  • 类型特征分析;包括unique分布、箱型图、小提琴图、柱形图、类别频数可视化等;
  • pandas_profiling生成数据报告

代码示例:

#  导入绘图包
import matplotlib.pyplot as plt
import seaborn as sns
# 1.判断数据缺失和异常
print(train_data.isnull().sum())  #查看每列存在nan的情况
#  nan可视化
missing = train_data.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar()  #柱状图

若存在nan的个数很小,在使用传统机器学习模型时可选择填充,适用lgb/xgb等树模型时可空缺由树自身优化,nan不影响分布的情况下棵考虑直接删除

#  2. 将缺省值可视化
import missingno as msno
msno.matrix(train_data.sample(250))
msno.bar(train_data.sample(1000))
# 3. 异常值检测
train_data['notRepairedDamage'].value_counts()  #记数
train_data['notRepairedDamage'].replace('-', np.nan, inplace=True)  # 将缺省值替换为nan
train_data['notRepairedDamage'].value_counts()
# 4.查看倾斜较为严重的特征并删除
train_data["seller"].value_counts()
train_data["offerType"].value_counts()
del train_data["seller"]
del train_data["offerType"]
# 5.了解预测值的分布
train_data['price'].value_counts()
# 6.总体分布情况(无界约翰逊分布)
import scipy.stats as st
y = train_data['price']
plt.figure(1); plt.title('Johnson SU')
sns.distplot(y, kde=False, fit=st.johnsonsu)
plt.figure(2); plt.title('Normal')
sns.distplot(y, kde=False, fit=st.norm)
plt.figure(3); plt.title('Log Normal')
sns.distplot(y, kde=False, fit=st.lognorm)
# 若预测值不服从正态分布,在回归前必须进行转换,原因在于正态分布能保证数据独立分布,加快模型处理效率。一般采用无界约翰逊分布对其进行转换
# 7.查看skewness and kurtosis
sns.distplot(train_data['price']);
print("Skewness: %f" % train_data['price'].skew())
print("Kurtosis: %f" % train_data['price'].kurt())
train_data.skew()
train_data.kurt()
sns.distplot(train_data.skew(),color='blue',axlabel ='Skewness')
sns.distplot(train_data.kurt(),color='orange',axlabel ='Kurtness')
# 8.查看预测值的具体频数
plt.hist(train_data['price'], orientation = 'vertical',histtype = 'bar', color ='red')
plt.show()
# 9.log变换之后的分布较均匀,可以进行log变换进行预测,这也是预测问题常用的trick
plt.hist(np.log(train_data['price']), orientation = 'vertical',histtype = 'bar', color ='red')
plt.show()
# 10.分离label即预测值
Y_train = train_data['price']
numeric_features = Train_data.select_dtypes(include=[np.number])
categorical_features = Train_data.select_dtypes(include=[np.object])
 # 也可直接人为给定
 # 11. 特征nunique分布
for cat_fea in categorical_features:
	print(cat_fea + "的特征分布如下:")
	print("{}特征有个{}不同的值".format(cat_fea, 				train_data[cat_fea].nunique()))
	print(train_data[cat_fea].value_counts())
#  数字特征分析
#  12.相关性分析
price_numeric = train_data[numeric_features]
correlation = price_numeric.corr()
print(correlation['price'].sort_values(ascending = False),'\n')
f , ax = plt.subplots(figsize = (7, 7))
plt.title('Correlation of Numeric Features with Price',y=1,size=16)
sns.heatmap(correlation,square = True, vmax=0.8)
# 13. 查看几个特征得 偏度和峰值
for col in numeric_features:
	print('{:15}'.format(col),'Skewness: {:05.2f}'.format(Train_data[col].skew()) , ' ' ,'Kurtosis: {:06.2f}'.format(Train_data[col].kurt()))
# 14.每个数字特征的分布可视化
f = pd.melt(Train_data, value_vars=numeric_features)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
# 15.数字特征相互之间的关系可视化
sns.set()
columns = ['price', 'v_12', 'v_8' , 'v_0', 'power', 'v_5', 'v_2', 'v_6', 'v_1', 'v_14']
sns.pairplot(Train_data[columns],size = 2 ,kind ='scatter',diag_kind='kde')
plt.show()
# 16.多变量互相回归关系可视化
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10)) = plt.subplots(nrows=5, ncols=2,
# ['v_12', 'v_8' , 'v_0', 'power', 'v_5', 'v_2', 'v_6', 'v_1', 'v_14']
v_12_scatter_plot = pd.concat([Y_train,Train_data['v_12']],axis = 1)
sns.regplot(x='v_12',y = 'price', data = v_12_scatter_plot,scatter= True, fit_reg=True, ax=ax1)
v_8_scatter_plot = pd.concat([Y_train,Train_data['v_8']],axis = 1)
sns.regplot(x='v_8',y = 'price',data = v_8_scatter_plot,scatter= True, fit_reg=True, ax=ax2)
v_0_scatter_plot = pd.concat([Y_train,Train_data['v_0']],axis = 1)
sns.regplot(x='v_0',y = 'price',data = v_0_scatter_plot,scatter= True, fit_reg=True, ax=ax3)
power_scatter_plot = pd.concat([Y_train,Train_data['power']],axis = 1)
sns.regplot(x='power',y = 'price',data = power_scatter_plot,scatter= True, fit_reg=True, ax=ax4)
v_5_scatter_plot = pd.concat([Y_train,Train_data['v_5']],axis = 1)
sns.regplot(x='v_5',y = 'price',data = v_5_scatter_plot,scatter= True, fit_reg=True, ax=ax5)
v_2_scatter_plot = pd.concat([Y_train,Train_data['v_2']],axis = 1)
sns.regplot(x='v_2',y = 'price',data = v_2_scatter_plot,scatter= True, fit_reg=True, ax=ax6)
v_6_scatter_plot = pd.concat([Y_train,Train_data['v_6']],axis = 1)
sns.regplot(x='v_6',y = 'price',data = v_6_scatter_plot,scatter= True, fit_reg=True, ax=ax7)
v_1_scatter_plot = pd.concat([Y_train,Train_data['v_1']],axis = 1)
sns.regplot(x='v_1',y = 'price',data = v_1_scatter_plot,scatter= True, fit_reg=True, ax=ax8)
v_14_scatter_plot = pd.concat([Y_train,Train_data['v_14']],axis = 1)
sns.regplot(x='v_14',y = 'price',data = v_14_scatter_plot,scatter= True, fit_reg=True, ax=ax9)
v_13_scatter_plot = pd.concat([Y_train,Train_data['v_13']],axis = 1)
sns.regplot(x='v_13',y = 'price',data = v_13_scatter_plot,scatter= True, fit_reg=True, ax=ax10)
# 类别特征分析
# 17.  unique分布
for fea in categorical_features:
	print(train_data[fea].nunique())
# 18.类别特征箱形图可视化
categorical_features = ['model','brand','bodyType','fuelType','gearbox','notRepairedDamage']
for c in categorical_features:
	train_data[c] = train_data[c].astype('category')
	if train_data[c].isnull().any():
		train_data[c] =train_data[c].cat.add_categories(['MISSING'])
		train_data[c] = train_data[c].fillna('MISSING')
def boxplot(x, y, **kwargs):
	sns.boxplot(x=x, y=y)
	x=plt.xticks(rotation=90)
	
f = pd.melt(train_data, id_vars=['price'], value_vars=categorical_features)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(boxplot, "value", "price")
# 19. 类别特征的小提琴图可视化
catg_list = categorical_features
target = 'price'
for catg in catg_list :
	sns.violinplot(x=catg, y=target, data=Train_data)
	plt.show()
# 20.类别特征的柱形图可视化
def bar_plot(x, y, **kwargs):
	sns.barplot(x=x, y=y)
	x=plt.xticks(rotation=90)
	
f = pd.melt(Train_data, id_vars=['price'], value_vars=categorical_features)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(bar_plot, "value", "price")
# 21. 类别特征的每个类别频数可视化(count_plot)
def count_plot(x, **kwargs):
	sns.countplot(x=x)
	x=plt.xticks(rotation=90)
	
f = pd.melt(Train_data, value_vars=categorical_features)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(count_plot, "value")
# 22. 用pandas_profiling生成数据报告
import pandas_profiling
pfr = pandas_profiling.ProfileReport(train_data)
pfr.to_file("./example.html")

总结

  • 数据集的缺失处理应看缺失值的占比情况,一般来说超过30%以上需要进行填充,填充方式需要根据数据特征进行分析(均值填充、0填充、众数填充)
  • 异常值需单独分析,特征异常的label是否异常,异常值剔除还是用正常值填充,异常为记录异常还是机器本身的异常。
  • 预测值需要单独分析
  • 不同特征的分布情况如何,包括数值特征和类型特征。
发布了19 篇原创文章 · 获赞 17 · 访问量 1461

猜你喜欢

转载自blog.csdn.net/weixin_43839651/article/details/105079884