NLP练习

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor,BaggingRegressor
from sklearn.model_selection import cross_val_score
from nltk.stem.snowball import SnowballStemmer


#读入训练/测试集
df_train=pd.read_csv(' ',encoding='ISO-8859-1')
df_test=pd.read_csv(' ',encoding='ISO-8859-1')
df_desc=pd.read_csv(' ')

#查看数据的长相
df_train.head()
df_desc.head()

#直接合并测试/训练集,以便统一做进一步的文本预处理
df_all=pd.concat((df_train,df_test),axis=0,ignore_index=True)
df_all.head()

#把产品介绍也一起合并
df_all=pd.merge(df_all,df_desc,how='left',on='product_uid')
df_all.head()

#-------------1.文本预处理---------------
#词干提取
stemmer=SnowballStemmer('english')
def str_stemmer(s):
    return ' '.join([stemmer.stem(word) for word in s.lower().split()])

#计算关键词的有效性
def str_common_word(str1,str2):
    return sum(int(str2.find(word)>=0)for word in str1.split())

#把所有列都跑一遍,清洁所有文本内容
df_all['search_term']=df_all['search_term'].map(lambda x:str_stemmer(x))
df_all['product_title']=df_all['product_title'].map(lambda x:str_stemmer(x))
df_all['product_description']=df_all['product_description'].map(lambda x:str_stemmer(x))

#-------------------2 自制文本特征-----------------
#关键词长度
df_all['len_of_query']=df_all['search_term'].map(lambda x:len(x.split())).astype(np.int64)
#标题中有多少关键词重合
df_all['commons_in_title']=df_all.apply(lambda x:str_common_word(x['search_term'],x['product_title']),axis=1)
#a描述中有多少关键词重合
df_all['commons_in_desc']=df_all.apply(lambda x:str_common_word(x['search_term'],x['product_description']),axis=1)

#把不能被机器学习模型处理的列去掉
df_all=df_all.drop(['search_term','product_title','product_description'],axis=1)


#--------------------3重塑训练/测试集-----------------
#分开训练和测试集
df_train=df_all.loc[df_train.index]
df_test=df_all.loc[df_test.index]
#记录下测试集的id
test_ids=df_test['id']
#分离出y_train
y_train=df_train['relevance'].values
#把原集中的label删去
X_train = df_train.drop(['id','relevance'],axis=1).values
X_test = df_test.drop(['id','relevance'],axis=1).values

#----------------------4 建立模型-------------------------
params=[1,3,5,6,7,8,9,10]
test_scores=[]
for param in params:
    clf=RandomForestRegressor(n_estimators=30,max_depth=param)
    test_score=np.sqrt(-cross_val_score(clf,X_train,y_train,cv=5,scoring='neg_mean_squared_error'))
    test_scores.append(np.mean(test_score))

#画图看看
import matplotlib.pyplot as plt
plt.plot(params,test_scores)
plt.title('Param vs CV Error')

#---------------------5 上传结果--------------
rf=RandomForestRegressor(n_estimators=30,max_depth=6)
rf.fit(X_train,y_train)
y_pred=rf.predict(X_test)
pd.DataFrame({'id':test_ids,'relevance':y_pred}).to_csv('submission.csv',index=False)

猜你喜欢

转载自blog.csdn.net/weixin_39667003/article/details/85286990
NLP