机器学习:泰坦尼克号(代码中⽂注释)
⼀个⾮常经典的机器学习实战题⽬。
通过python,运⽤线性回归、逻辑回归、随机森林、堆叠算法等处理数据判断船员存活情况。
代码在唐宇迪教程的代码的基础上进⾏了修改,主要结合⽤到的库的更新做出⼀定改动,以适应⽬前版本。全部代码都运⾏过,确保近期可⽤。
导⼊训练集(共891条)
import pandas
titanic = ad_csv("titanic_train.csv")
titanic.head(6)
填充缺失值部分、转化为数字
print(titanic.describe())
#⽤均值填充缺失年龄
titanic["Age"]= titanic["Age"].fillna(titanic["Age"].median())
titanic.head(6)
#显⽰有哪些可能性
print(titanic["Sex"].unique())
#默认进⾏替换⽤数字替换Sex栏的值
titanic.loc[titanic["Sex"]=="male","Sex"]=0
titanic.loc[titanic["Sex"]=="female","Sex"]=1
#字符型且有NaN值哪⼀个最多就⽤哪⼀个填充空缺
#全都换成0 1 2
print(titanic["Embarked"].unique())
titanic["Embarked"]=titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"]=="S","Embarked"]=0
编程班培训平台
titanic.loc[titanic["Embarked"]=="C","Embarked"]=1
titanic.loc[titanic["Embarked"]=="Q","Embarked"]=2
线性回归
#导⼊线性回归、交叉验证所需库
from sklearn.linear_model import LinearRegression
del_selection import KFold
#为了预测结果需要使⽤的栏
predictors =["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
#初始化算法类(algorithm class)
alg = LinearRegression()
#对泰坦尼克号数据集进⾏交叉验证。返回对应于train和test的⾏索引。
#把n_folds改为n_splits,并删去其他参数
kf = KFold(n_splits=3)
predictions =[]
for train, test in kf.split(titanic):
#⽤到的特征的数据
train_predictors =(titanic[predictors].iloc[train,:])
#对应位置的survived值
train_target = titanic["Survived"].iloc[train]
#训练
alg.fit(train_predictors, train_target)
#预测
test_predictions = alg.predict(titanic[predictors].iloc[test,:])
#添加到结果集
predictions.append(test_predictions)
#print(predictions)
打印准确率
import numpy as np
#连接
predictions = np.concatenate(predictions, axis=0)
#判断存活并全部化为0、1表⽰
predictions[predictions >.5]=1
predictions[predictions <=.5]=0
#准确率
accuracy =sum(predictions == titanic["Survived"])/len(predictions)
print(accuracy)
⽤真正的测试集进⾏测试
titanic_test = ad_csv("test.csv")
titanic_test["Age"]= titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"]= titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"]=="male","Sex"]=0
titanic_test.loc[titanic_test["Sex"]=="female","Sex"]=1
titanic_test["Embarked"]= titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"]=="S","Embarked"]=0
titanic_test.loc[titanic_test["Embarked"]=="C","Embarked"]=1
titanic_test.loc[titanic_test["Embarked"]=="Q","Embarked"]=2
predictors =["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
prediction =alg.predict(titanic_test[predictors])
prediction[prediction >.5]=1
prediction[prediction <=.5]=0
wap2怎么设置accuracy =sum(prediction == titanic_test["Survived"])/len(prediction)
print(accuracy)
逻辑回归
#from sklearn import cross_validation cross_validation不能⽤了,⽤model_delection替代from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
# 初始化算法
alg = LogisticRegression()
#⽤⾃带的函数直接计算所有交叉验证的准确度得分
scores = ss_val_score(alg, titanic[predictors], titanic["Survived"], cv=3) #取平均值
an())
随机森林
from sklearn import model_selection
semble import RandomForestClassifier#⾸推使⽤随机森林
predictors =["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
#10:树的个数  2:当样本数是2的时候不切  1:叶⼦节点个数最少1个
alg = RandomForestClassifier(n_estimators=10, min_samples_split=2, min_samples_leaf=1) #⽤⾃带的函数直接计算所有交叉验证的准确度得分
kf = model_selection.KFold(n_splits=3)
scores = ss_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf) #取平均值
an())
调整参数
#调整参数扩⼤树的数量减少过拟合情况
#需要不断调整以到最优参数
alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=4, min_samples_leaf=2)
kf = model_selection.KFold(n_splits=3)
scores = ss_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf)
an())
⾃定义新特征
新特征:家⼈数⽬、姓名长度
#提取新的特征
# ⽣成⼀个familysize栏
titanic["FamilySize"]= titanic["SibSp"]+ titanic["Parch"]
#把名字长度也作为⼀个特征
titanic["NameLength"]= titanic["Name"].apply(lambda x:len(x))属于中文编程有什么
新特征:头衔
#从名称中获取头衔的函数
def get_title(name):
#使⽤正则表达式搜索头衔,头衔总是由⼤写字母和⼩写字母组成,并以'.'结尾
title_search = re.search(' ([A-Za-z]+)\.', name)
#如果头衔存在,提取并返回
if title_search:
return up(1)
return""
#获取所有头衔并打印每个头衔出现的频率
titles = titanic["Name"].apply(get_title)
print(pandas.value_counts(titles))
#将每个头衔映射到⼀个整数
title_mapping ={"Mr":1,"Miss":2,"Mrs":3,"Master":4,"Dr":5,"Rev":6,"Major":7,"Col":7,"Mlle":8,"Mme":8,"Don":9,"Lady":10,"Countess":10,"Jon kheer":10,"Sir":9,"Capt":7,"Ms":2}
for k,v in title_mapping.items():
titles[titles == k]= v
#验证我们是否转换了所有内容
print(pandas.value_counts(titles))
#增加⼀个头衔栏
titanic["Title"]= titles
特征重要性可视化
python基础代码注释import numpy as np
from sklearn.feature_selection import SelectKBest, f_classif
import matplotlib.pyplot as plt
predictors =["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked","FamilySize","Title","NameLength"]
#进⾏特征选择
selector = SelectKBest(f_classif, k=5)
selector.fit(titanic[predictors], titanic["Survived"])
#获取每个特征的原始p值,并将p值转换为分数
scores =-np.log10(selector.pvalues_)
#画图
plt.bar(range(len(predictors)), scores)async函数返回值
plt.show()
#只选择四个最好的特征
predictors =["Pclass","Sex","Fare","Title"]
alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=8, min_samples_leaf=4)
堆叠算法(通常效果最好)
semble import GradientBoostingClassifier
import numpy as np
#将要集成的算法
#在逻辑回归中加了⼀些参数
algorithms =[
[GradientBoostingClassifier(n_estimators=25, max_depth=3),["Pclass","Sex","Age","Fare","Embarked","FamilySize","Title",]], [LogisticRegression(),["Pclass","Sex",
lookup公式用法"Fare","FamilySize","Title","Age","Embarked"]]
]
#初始化交叉验证
kf = KFold(n_splits=3)
predictions =[]
for train, test in kf.split(titanic):
train_target = titanic["Survived"].iloc[train]
full_test_predictions =[]
for alg, predictors in algorithms:
#训练
alg.fit(titanic[predictors].iloc[train,:], train_target)
#选择并预测
#.astype(float)将dataframe转换为float并避免sklearn报错
test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
full_test_predictions.append(test_predictions)
# 使⽤⼀个简单的组合⽅案——⽤预测值的平均值得到最终的分类
test_predictions =(full_test_predictions[0]+ full_test_predictions[1])/2
test_predictions[test_predictions <=.5]=0
test_predictions[test_predictions >.5]=1
predictions.append(test_predictions)
#把所有的预测放在⼀个数组中
predictions = np.concatenate(predictions, axis=0)
#准确率
accuracy =sum(predictions == titanic["Survived"])/len(predictions)
print(accuracy)