评分卡模型简易代码

import numpy as np
import pandas as pd
from function import *
#加载数据 shape= (2500,627)
data = pd.read_csv("train2500.csv",delimiter=",")
#data = data.iloc[:10000,:]
print(data.shape)
#去除重复值
data.drop_duplicates(inplace=True)
data["target"] = data["bad_good"]
del data["bad_good"]
#step1 : 粗略处理数据
#根据字段说明,首先去除一些无用字段:客户号,开户机构,证件类型
del data[data.columns[0]]
del data[data.columns[1]]
del data[data.columns[2]]
#将某列某个单独值的比率大于90以上的列删除
del_index = []
# for col in data.columns[:-1]:
# if max(data[col].value_counts(normalize=True))>0.9:
# # del data[col]
# del_index.append(col)
for i in range(len(data.columns[:-1])):
if max(data[data.columns[i]].value_counts(normalize=True)) > 0.9:
del_index.append(i)
data.drop(data.columns[del_index],axis =1 ,inplace = True)
print(data.shape)
#经过粗略处理,剩余维度是:(2500, 221)
#step2:将数据进行分类,数值型和类别型
#分割依据:如果不重复值超过10个,则说明是连续型数值,否则是类别型数据
numerical_feature =[] #数值型特征列表 211 个
categorical_feature = [] #类别型特征列表 10 个
#将特征循环添加到两个列表
for col in data.columns:
if len(data[col].drop_duplicates())>10:
numerical_feature.append(col)
else:
categorical_feature.append(col)
categorical_feature.remove("target")
"""
处理缺失值:
如果缺失超过80%, 构建衍生特征(缺失和非缺失做为两个类别),并删除原特征
如果缺失在50%和80%之间, 对数据进行分箱处理,并将有缺失的样本作为单独的类别标记
如果缺失在30%到50%之间,使用随机选特征进行模型填补,数值型使用回归填补,类别型使用分类填补,
"""
#判断存在缺失值的列
notNullCol = [] #不存在缺失值的列 221
isNullCol = [] #存在缺失值的列 0
for col in data.columns[:-1]:
if sum(data[col].isnull())>0: #如果存在缺失值,那么大于0
isNullCol.append(col)
else:
notNullCol.append(col)
print(len(notNullCol))
print(len(isNullCol))
#计算某列的缺失率
def MissingRate(df,col):
return df[col].isnull().sum()/df.shape[0]
#当缺失值大于某个阈值时,构建衍生特征,返回的是两类1和0
def MissingNewFeature(df,col):
return df[col].isnull().astype(int)
#如果缺失在50%和80%之间, 对数据进行分箱处理,并将有缺失的样本作为单独的类别标记,返回处理后的df
def CategoricalMissingFeature(df,col,n=4):
data = df[df[col].notnull()][col]
df.loc[df[col].notnull(),col] = pd.qcut(data,n)
df.loc[df[col].isnull(), col] = "miss"
return df
#如果缺失50%以下,对数值特征使用拉格朗日,对类别特征使用众数填补
def Model_fillna(df,col,feature,method):
"""
:param df:
:param col:
:param feature: 随机选取几列非缺特征
:param method:
:return:
"""
train_x = df[df[col].notnull()][feature]
test_x = df[df[col].isnull()][feature]
train_y = df[df[col].notnull()][col]
if method == "clf":
#分类问题采用随机森林分类
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(train_x,train_y)
df.loc[df[col].isnull(),col] = clf.predict(test_x)
elif method =="reg":
#采用拉格朗日插值法
df[col] = Lagrange(data,col)
return df
#开始进行缺失值处理
for col in isNullCol:
if MissingRate(data,col)>0.8:
data[col] = MissingNewFeature(data,col)
elif 0.5<MissingRate(data,col)<0.8:
df = CategoricalMissingFeature(data,col)
elif MissingNewFeature(data,col)<0.5:
feature = np.random.sample(notNullCol,3)
if col in numerical_feature:
data = Model_fillna(data,col,feature,method="reg")
else:
data = Model_fillna(data, col, feature, method="clf")
#经过上述缺失值处理,数字型与类别型变量会发生变化,所以进行重新归类
numerical_feature =[] #数值型特征列表 211 个
categorical_feature = [] #类别型特征列表 10 个
for col in data.columns:
if len(data[col].drop_duplicates())>10:
numerical_feature.append(col)
else:
categorical_feature.append(col)
categorical_feature.remove("target")
"""
截至到此处,所有数据粗略预处理完毕。下面开始进行特征选择
"""
#对于数值型变量的处理
#首先将数值型数据进行z_socre归一化
print(data.shape)
from sklearn.preprocessing import StandardScaler
for col in numerical_feature:
data[col] = StandardScaler().fit_transform(np.array(data[col]).reshape(-1,1))
#进行方差分析
data = Variance(data,col) #处理后特征数减少到154个
#对于类别型变量求每一个特征的woe值和Iv值,通过iv值,筛选特征
for col in categorical_feature:
bad_total = sum(data["target"]) #坏样本总数
bad = data.groupby([col])["target"].sum()
bad_percent = np.array(bad) * 1.0 / np.array(bad_total)
good_total = data.shape[0]-bad_total #好样本总数
good = data.groupby([col])["target"].count()-bad
good_percent = np.array(good)*1.0 / np.array(good)
diff = bad_percent-good_percent
percent = bad_percent / good_percent
IV = 0
for i in range(len(percent)):
if percent[i] != 0 :
IV += diff[i]*np.log(percent[i])
if IV <0.2:
del data[col]
categorical_feature.remove(col)
#对于离散特征进行bad_rate编码
for col in categorical_feature:
bad_total = sum(data["target"])#该列分组的各类总数
bad = data.groupby([col])["target"].sum()
percent = np.array(bad)*1.0 / np.array(bad_total)
dicts = dict(zip(bad.index,percent))
data[col+"_badEncoding"] = data[col].map(dicts)
del data[col]
#通过递归RFE删除特征
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeClassifier
estimator = DecisionTreeClassifier()
rfe = RFE(estimator=estimator,n_features_to_select=80)
rfe.fit_transform(np.array(data.iloc[:,:-1]),np.array(data["target

猜你喜欢

转载自www.cnblogs.com/daguonice/p/11420248.html