“梧桐杯”中国移动大数据应用创新大赛 - 智慧城市赛道baseline

开源一个0.827的baseline
没做太多特征,读数据,看分布,如果分布是长尾分布就加个变换
去掉相关系数低于0.05的特征
对某些在某些区间聚集较为明显的特征分桶处理
网格调参,我还没跳到最优,太慢了
采用xgb,rf融合模型
注释已经很详细了
进不去前14,拿不了复赛名额,就开源吧
是用jupyter写的,ipynb文件发到了大赛群里

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import warnings
warnings.filterwarnings("ignore")

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns


# ## 读取数据

# In[2]:


train_set = pd.read_csv('./train_set.csv')
train_set.head()


# ### 正样本占比0.2

# In[3]:


train_label = pd.read_csv('./train_label.csv')
train_label[train_label['label'] == 1].shape[0] / train_label.shape[0]


# In[4]:


# 测试集读入
test = pd.read_csv('result_predict_A.csv')
test['label'] = -1
# test.info()


# In[5]:


# 全样本构建,flag判断是训练集还是测试集
train = pd.merge(train_set, train_label)
all_data = train.append(test).reset_index(drop='True')
all_data['flag'] = all_data['label'].map(lambda x: 'test' if x == -1 else 'train')
# all_data['X5'].mode()
all_data['X5'] = all_data['X5'].fillna('大众用户')
all_data[all_data['X6'].isnull() & (all_data['label'] == -1)]
all_data.head()


# In[91]:


all_data.info()


# ### 测试集中X6到17缺失的人直接赋值0
# ### X5用众数即大众用户填充

# ### X6,X7,X8具有强相关性,X4和userid有强相关性,X3,32,33和6,7,8有关系

# In[6]:


train.corr()  #3,6,7,8,32,33,24


# In[7]:


corr_dict = dict(train.corr()[train.corr() > 0.1].iloc[:, -1].dropna())
columns = list(corr_dict.keys())[:-1]    # 强相关的列


# ### 进一步分析相关性

# In[8]:


train['X5'] = train['X5'].fillna('大众用户')
set(train['X5'].to_list())


# ## 特征工程

# ### 考虑6-14要不要删除
# ### 24,28极强相关

# In[10]:


# columns = ['X3', 'X5', 'X15', 'X16', 'X17', 'X24', 'X29', 'X32', 'X34', 'X37', 'X39']
# columns = ['X' + str(i) for i in [3,5,6,7,8,9,10,11,12,13,14,15,16,17,24,29,32,33,34,37,39]]
columns = ['X' + str(i) for i in [3,5,6,7,8,9,12,15,16,17,24,29,32,34,37,38,39,41,42,43]]
columns.append('user_id')


# ### 尝试加入其他特征

# In[11]:


all_data = all_data[[i for i in columns] + ['label', 'flag']]
all_data['X38'] = all_data['X38'].fillna(0)
all_data.head()


# In[12]:


all_data = all_data.dropna(axis=0, subset=['X16'])
all_data.info()


# 改变数据分布

# In[13]:


all_data['X8'] = np.log(all_data['X8'].values+1)
sns.kdeplot(all_data['X8'], color="Red", shade = True)


# In[14]:


all_data['X7'] = np.log(all_data['X7'].values+1)
sns.kdeplot(all_data['X7'], color="Red", shade = True)


# In[15]:


all_data['X6'] = np.log(all_data['X6'].values+1)
sns.kdeplot(all_data['X6'], color="Red", shade = True)


# In[16]:


all_data['X9'] = np.log(all_data['X9'].values+1)
sns.kdeplot(all_data['X9'], color="Red", shade = True)


# In[17]:


all_data['X15'] = np.log(all_data['X15'].values+1)
sns.kdeplot(all_data['X15'], color="Red", shade = True)


# In[18]:


all_data['X16'] = np.log(all_data['X16'].values+1)
sns.kdeplot(all_data['X16'], color="Red", shade = True)


# 新增特征

# In[19]:


def trans(x):
    if x <= 1:
        return 0
    elif x > 1 and x < 6:
        return 1
    else:
        return 2


# In[20]:


all_data['X16_range'] = all_data['X16'].apply(trans)


# In[21]:


all_data['X17'] = np.log(all_data['X17'].values+1)
sns.kdeplot(all_data['X17'], color="Red", shade = True)


# In[22]:


sns.kdeplot(all_data['X24'], color="Red", shade = True)


# In[23]:


sns.kdeplot(all_data['X29'], color="Red", shade = True)


# In[24]:


all_data.head()


# ### 用数字特征填补缺失值

# In[25]:


sns.kdeplot(all_data['X3'], color="Red", shade = True)
all_data['X3'] = all_data['X3'].fillna(3)


# In[26]:


sns.kdeplot(all_data['X29'], color="Red", shade = True)    # 24,32,33
all_data['X29'] = all_data['X29'].fillna(0)


# In[35]:


sns.kdeplot(all_data['X34'], color="Red", shade = True)    # 24,32,33
all_data['X34'] = all_data['X34'].fillna(0)


# In[27]:


all_data = pd.concat([pd.get_dummies(all_data['X5']), all_data], axis=1).drop('X5', axis=1)
all_data.head()


# 处理X32

# In[31]:


all_data['X32'] = np.log(all_data['X32'].values+1)
sns.kdeplot(all_data['X32'], color="Red", shade = True)


# In[ ]:


# 填充缺失值
from sklearn.ensemble import RandomForestRegressor
temp = all_data

# X32
known = temp[temp['X32'].notnull()]
unknown = temp[temp['X32'].isnull()]
X = known.drop(['user_id', 'X32', 'label', 'flag'], axis=1).values


y = known['X32'].values
rfr = RandomForestRegressor(random_state=0, n_estimators=100)
rfr.fit(X, y)
predict_X32 = rfr.predict(unknown.drop(['user_id', 'X32', 'label', 'flag'], axis=1).values)
all_data.loc[all_data['X32'].isnull(), 'X32'] = predict_X32


# 新增特征

# In[124]:


def transX32(x):
    if x < 2.7:
        return 0
    elif 2.7 <= x  < 3.15:
        return 1
    elif 3.15 <= x < 3.92:
        return 2
    elif 3.92 <= x < 4.4:
        return 3
    elif 4.4 <= x <4.9:
        return 3
    else:
        return 4


# In[125]:


all_data['X32_range'] = all_data['X32'].apply(transX32)


# In[128]:


del all_data['X38']


# ### 分割训练集,验证集,测试集

# In[137]:


train = all_data[all_data['flag'] == 'train'].drop(['flag', 'user_id'], axis=1)
test = all_data[all_data['flag'] == 'test'].drop(['label', 'flag', 'user_id'], axis=1).reset_index(drop=True)


# In[140]:


import xgboost as xgb
from tqdm import tqdm
from xgboost.sklearn import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, f1_score  # 均方误差
import catboost as cb


X_train, X_cv, y_train, y_cv = train_test_split(train.drop(['label'], axis=1), train['label'], test_size=0.2)


# ### 网格调参

# In[758]:


param_grid = [
{
    
    'n_estimators': list(range(100, 501, 100)), 'max_depth':list(range(2,21,5))}
]

rf = RandomForestClassifier()
grid_search_rf = GridSearchCV(rf, param_grid, cv=5,
                          scoring='f1')

grid_search_rf.fit(low_consume_train.drop(['label'], axis=1), low_consume_train['label'])
print(grid_search_rf.best_estimator_)    # max_depth=17,n_estimators=500


# In[760]:


param_dist = {
    
    
        'n_estimators':list(range(20, 141, 20)), # 120
        'max_depth':list(range(2,15,5)), # 7
        'learning_rate':list(np.linspace(0.01,2,5)), # 0.01
#         'subsample':list(np.linspace(0.7,0.9,5)),
#         'colsample_bytree':list(np.linspace(0.5,0.98,3)),
#         'min_child_weight':list(range(1,9,3)) # 6
        }

xgb = XGBClassifier()
grid_search_xgb = GridSearchCV(xgb, param_dist,cv = 3,n_jobs = -1, scoring='f1')

grid_search_xgb.fit(train.iloc[:, :-1], train['label'])
print(grid_search_xgb.best_estimator_)


# ### 验证集效果

# In[141]:


train = all_data[all_data['flag'] == 'train'].drop(['flag', 'user_id'], axis=1)
test = all_data[all_data['flag'] == 'test'].drop(['label', 'flag', 'user_id'], axis=1).reset_index(drop=True)
X_train, X_cv, y_train, y_cv = train_test_split(train.drop(['label'], axis=1), train['label'], test_size=0.2)


# In[142]:


rf = RandomForestClassifier(n_estimators=500, max_depth=17).fit(X_train, y_train)
print('rf F1: {}' .format(f1_score(rf.predict(X_cv), y_cv)))


# In[143]:


xgb = XGBClassifier().fit(X_train, y_train)
print('xgb F1: {}' .format(f1_score(xgb.predict(X_cv), y_cv)))


# In[168]:


from sklearn.linear_model import LogisticRegression
for x in np.linspace(500, 1500, 10):
    clf3 = LogisticRegression(penalty='l2', C=0.1, max_iter=x, tol=1e-4, solver='lbfgs').fit(X_train, y_train)
    print(x)
    print('lr F1: {}' .format(f1_score(clf3.predict(X_cv), y_cv)))


# In[203]:


clf4 = cb.CatBoostClassifier(n_estimators=7000).fit(X_train, y_train)
print('catboost F1: {}' .format(f1_score(clf4.predict(X_cv), y_cv)))


# In[206]:


y_pred_1 = rf.predict_proba(X_cv)[:, 0]
y_pred_2 = clf4.predict_proba(X_cv)[:, 0]

y_pred = (y_pred_1 + y_pred_2 ) / 2
y_pred = list(map(lambda x: 1 if x<0.62 else 0, y_pred))
print(f1_score(y_pred, y_cv))


# 遍历找到阈值
score_lst = []
for i in list(np.linspace(0.45,0.75,100)):
    i = round(i, 4)
    y_pred = (y_pred_1 + y_pred_2 ) / 2
    y_pred = list(map(lambda x: 1 if x<i else 0, y_pred))
    score = f1_score(y_pred, y_cv)
    score_lst.append([i, score])
    print('i={}, total F1: {}' .format(i, score))

score_lst = np.array(score_lst)
plt.plot(score_lst[:, 0], score_lst[:, 1])


# ### 预测

# In[1]:


clf1 = RandomForestClassifier(n_estimators=500, max_depth=17)
clf2 = cb.CatBoostClassifier(n_estimators=5000)

clf1.fit(train.drop(['label'], axis=1), train['label'])
print('训练完了')
clf2.fit(train.drop(['label'], axis=1), train['label'])


# In[232]:


y_pred_1 = clf1.predict_proba(test)[:, 0]
y_pred_2 = clf2.predict_proba(test)[:, 0]

y_pred = (y_pred_1 + y_pred_2 ) / 2 
y_pred = list(map(lambda x: 1 if x<0.75 else 0, y_pred))


# 添加特殊用户

# In[233]:


temp = pd.read_csv('result_predict_A.csv')
temp[temp['X16'].isnull()]


# In[190]:


extra = pd.DataFrame([['2697592699877', 0], ['2697527496793', 0], ['2697624945417', 0]], columns=['user_id', 'label'])
extra.head()


# In[234]:


result = pd.read_csv('result_predict_A.csv')
result = result.dropna(axis=0, subset=['X16'])


result['label'] = y_pred
result = result[['user_id', 'label']]

# 加入X16为NAN的三个样本
result = result.append(extra)

result.head()


# In[235]:


result.shape


# In[236]:


result.to_csv('./submit.csv', index=False)


猜你喜欢

转载自blog.csdn.net/m0_46162954/article/details/114270982