预测泰坦尼克号获救人员的案例实战

步骤:
1、加载数据
2、查看数据的特征信息
3、特征选择-pclass船舱仓位-sex-age
4、对age列进行空值填充
5、对pclass处理和sex的处理
6、对数据集进行划分,划分为训练集和测试集
7、加载算法构建模型
8、预测
9、校验

# coding=utf-8
# @Time    : 2019/12/1 20:49
# @Author  : Z
# @Email   : S
# @File    : 1.0Tantanic.py

#使用多种算法预测泰坦尼克号获救人员

import pandas as pd
#1.加载数据
#sep:分隔符
tantanic = pd.read_csv("./tantanic.txt", sep=",")
#2.查看数据的特征信息
print(tantanic.head())
print(tantanic.info())
# row.names    1313 non-null int64
# pclass       1313 non-null object
# survived     1313 non-null int64
# name         1313 non-null object
# age          633 non-null float64
# embarked     821 non-null object
# home.dest    754 non-null object
# room         77 non-null object
# ticket       69 non-null object
# boat         347 non-null object
# sex          1313 non-null object
#打印列信息
print(tantanic.columns)
# Index(['row.names', 'pclass', 'survived', 'name', 'age', 'embarked',
#        'home.dest', 'room', 'ticket', 'boat', 'sex'],
#几行几列
print(tantanic.shape) #(1313, 11)
#3.特征选择-pclass船舱仓位-sex-age
X= tantanic[["pclass","age","sex"]]
y = tantanic["survived"]
print(X.info())
# Data columns (total 3 columns):
# pclass    1313 non-null object
# age       633 non-null float64
# sex       1313 non-null object
# dtypes: float64(1), object(2)
import numpy as np
print(np.unique(y)) #[0,1]
#4.对age列进行空值填充
X["age"].fillna(X["age"].mean(),inplace=True)
print(X.info())
# pclass    1313 non-null object
# age       1313 non-null float64
# sex       1313 non-null object
#6.对数据集进行划分
from sklearn.model_selection import  train_test_split
# train_test_split函数用于将矩阵随机划分为训练子集和测试子集,并返回划分好的训练集测试集样本和训练集测试集标签。
# train_data:被划分的样本特征集
# train_target:被划分的样本标签
# test_size:如果是浮点数,在0-1之间,表示样本占比;如果是整数的话就是样本的数量
# random_state:是随机数的种子。
# 随机数种子:其实就是该组随机数的编号,在需要重复试验的时候,保证得到一组一样的随机数。比如你每次都填1,其他参数一样的情况下你得到的随机数组是一样的。但填0或不填,每次都会不一样。
# 随机数的产生取决于种子,随机数和种子之间的关系遵从以下两个规则:
# 种子不同,产生不同的随机数;种子相同,即使实例不同也产生相同的随机数。
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=33)
#5.对pclass处理和sex的处理
from sklearn.feature_extraction import DictVectorizer
#实例化
#sparse=False 取消稀疏矩阵
dtc=DictVectorizer(sparse=False)
# fit_transform(raw_documents, y=None):学习词汇词典并返回术语 - 文档矩阵(稀疏矩阵)。
X_train_dict=dtc.fit_transform(X_train.to_dict(orient='records'))
# transform(raw_documents):使用符合fit的词汇表或提供给构造函数的词汇表,从原始文本文档中提取词频,转换成词频矩阵。
X_test_dict=dtc.transform(X_test.to_dict(orient='records'))
# print(X_train_dict)
# print(dtc.feature_names_)
#6.加载算法构建模型
from sklearn.tree import DecisionTreeClassifier
# tmc=DecisionTreeClassifier(criterion="gini")
# # model in train set score: 0.8685714285714285
# # model in test set score: 0.779467680608365
# from sklearn.ensemble import RandomForestClassifier
# tmc=RandomForestClassifier(n_estimators=100)
# # model in train set score: 0.8685714285714285
# # model in test set score: 0.7908745247148289
# from sklearn.ensemble import BaggingClassifier
# tmc=BaggingClassifier()
# # model in train set score: 0.8628571428571429
# # model in test set score: 0.7870722433460076
# from sklearn.ensemble import AdaBoostClassifier
# tmc=AdaBoostClassifier(learning_rate=1)
# # model in train set score: 0.8457142857142858
# # model in test set score: 0.7870722433460076
from sklearn.ensemble import GradientBoostingClassifier
tmc=GradientBoostingClassifier(learning_rate=0.01)
tmc.fit(X_train_dict,y_train)
# model in train set score: 0.8361904761904762
# model in test set score: 0.8060836501901141
#7预测
y__train_pred = tmc.predict(X_train_dict)
y_pred = tmc.predict(X_test_dict)
print(y__train_pred)
#预测的结果
print(y_pred)
#8.校验
print("model in train set score:",tmc.score(X_train_dict,y_train))
print("model in test set score:",tmc.score(X_test_dict,y_test))

#9使用其他标准
from sklearn.metrics import  confusion_matrix
print("model in train confusion matrix:\n",confusion_matrix(y_train,y__train_pred))
print("model in test confusion matrix:\n",confusion_matrix(y_test,y_pred))
#classfication_report
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
发布了189 篇原创文章 · 获赞 13 · 访问量 1万+

猜你喜欢

转载自blog.csdn.net/NewBeeMu/article/details/103515650