吴裕雄 python 机器学习——模型选择数据集切分
import numpy as np
from sklearn.model_selection import train_test_split,KFold,StratifiedKFold,LeaveOneOut,cross_val_score #模型选择数据集切分train_test_split模型
def test_train_test_split():
X=[[1,2,3,4],
[11,12,13,14],
[21,22,23,24],
[31,32,33,34],
[41,42,43,44],
[51,52,53,54],
[61,62,63,64],
[71,72,73,74]]
y=[1,1,0,0,1,1,0,0]
# 切分,测试集大小为原始数据集大小的 40%
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.4, random_state=0)
print("X_train=",X_train)
print("X_test=",X_test)
print("y_train=",y_train)
print("y_test=",y_test)
# 分层采样切分,测试集大小为原始数据集大小的 40%
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.4,random_state=0,stratify=y)
print("Stratify:X_train=",X_train)
print("Stratify:X_test=",X_test)
print("Stratify:y_train=",y_train)
print("Stratify:y_test=",y_test) test_train_test_split()

#模型选择数据集切分KFold模型
def test_KFold():
X=np.array([[1,2,3,4],
[11,12,13,14],
[21,22,23,24],
[31,32,33,34],
[41,42,43,44],
[51,52,53,54],
[61,62,63,64],
[71,72,73,74],
[81,82,83,84]])
y=np.array([1,1,0,0,1,1,0,0,1])
# 切分之前不混洗数据集
folder=KFold(n_splits=3,random_state=0,shuffle=False)
for train_index,test_index in folder.split(X,y):
print("Train Index:",train_index)
print("Test Index:",test_index)
print("X_train:",X[train_index])
print("X_test:",X[test_index])
print("")
# 切分之前混洗数据集
shuffle_folder=KFold(n_splits=3,random_state=0,shuffle=True)
for train_index,test_index in shuffle_folder.split(X,y):
print("Shuffled Train Index:",train_index)
print("Shuffled Test Index:",test_index)
print("Shuffled X_train:",X[train_index])
print("Shuffled X_test:",X[test_index])
print("") test_KFold()

#模型选择数据集切分StratifiedKFold模型
def test_StratifiedKFold():
X=np.array([[1,2,3,4],
[11,12,13,14],
[21,22,23,24],
[31,32,33,34],
[41,42,43,44],
[51,52,53,54],
[61,62,63,64],
[71,72,73,74]]) y=np.array([1,1,0,0,1,1,0,0]) folder=KFold(n_splits=4,random_state=0,shuffle=False)
stratified_folder=StratifiedKFold(n_splits=4,random_state=0,shuffle=False)
for train_index,test_index in folder.split(X,y):
print("Train Index:",train_index)
print("Test Index:",test_index)
print("y_train:",y[train_index])
print("y_test:",y[test_index])
print("") for train_index,test_index in stratified_folder.split(X,y):
print("Stratified Train Index:",train_index)
print("Stratified Test Index:",test_index)
print("Stratified y_train:",y[train_index])
print("Stratified y_test:",y[test_index])
print("") test_StratifiedKFold()

#模型选择数据集切分LeaveOneOut模型
def test_LeaveOneOut():
X=np.array([[1,2,3,4],
[11,12,13,14],
[21,22,23,24],
[31,32,33,34]])
y=np.array([1,1,0,0])
lo=LeaveOneOut()
for train_index,test_index in lo.split(X):
print("Train Index:",train_index)
print("Test Index:",test_index)
print("X_train:",X[train_index])
print("X_test:",X[test_index])
print("") test_LeaveOneOut()

#模型选择数据集切分cross_val_score模型
def test_cross_val_score():
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC
digits=load_digits() # 加载用于分类问题的数据集
X=digits.data
y=digits.target
# 使用 LinearSVC 作为分类器
result=cross_val_score(LinearSVC(),X,y,cv=10)
print("Cross Val Score is:",result) test_cross_val_score()

吴裕雄 python 机器学习——模型选择数据集切分的更多相关文章
- 吴裕雄 python 机器学习——模型选择验证曲线validation_curve模型
import numpy as np import matplotlib.pyplot as plt from sklearn.svm import LinearSVC from sklearn.da ...
- 吴裕雄 python 机器学习——模型选择学习曲线learning_curve模型
import numpy as np import matplotlib.pyplot as plt from sklearn.svm import LinearSVC from sklearn.da ...
- 吴裕雄 python 机器学习——模型选择回归问题性能度量
from sklearn.metrics import mean_absolute_error,mean_squared_error #模型选择回归问题性能度量mean_absolute_error模 ...
- 吴裕雄 python 机器学习——模型选择分类问题性能度量
import numpy as np import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.datasets ...
- 吴裕雄 python 机器学习——模型选择参数优化暴力搜索寻优GridSearchCV模型
import scipy from sklearn.datasets import load_digits from sklearn.metrics import classification_rep ...
- 吴裕雄 python 机器学习——模型选择参数优化随机搜索寻优RandomizedSearchCV模型
import scipy from sklearn.datasets import load_digits from sklearn.metrics import classification_rep ...
- 吴裕雄 python 机器学习——模型选择损失函数模型
from sklearn.metrics import zero_one_loss,log_loss def test_zero_one_loss(): y_true=[1,1,1,1,1,0,0,0 ...
- 吴裕雄 python 机器学习——KNN回归KNeighborsRegressor模型
import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors, datasets from skle ...
- 吴裕雄 python 机器学习——KNN分类KNeighborsClassifier模型
import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors, datasets from skle ...
随机推荐
- mysql cmd链接不上数据库情况汇总
在我的电脑 属性 高级设置 环境变量 path 编辑 添加mysql bin的文件位置复制粘贴上 mysql> use mysqlERROR 1044 (42000): Access denie ...
- [HNOI2014] 道路堵塞 - 最短路,线段树
对不起对不起,辣鸡蒟蒻又来用核弹打蚊子了 完全ignore了题目给出的最短路,手工搞出一个最短路,发现对答案没什么影响 所以干脆转化为经典问题:每次询问删掉一条边后的最短路 如果删掉的是非最短路边,那 ...
- linux网卡
手动启动 ifup eth0 查询网卡配置信息 vim /etc/udev/rules.d/70-persistent-net.rules 备注:可以修改网卡名称和MAC地址
- 11g RAC添加用户表空间(数据文件)至文件系统(File System)的修正
前提:非TEMP.UNDO和SYSTEM表空间,这仨是大爷,您得搂着点.来自博客园AskScuti .客户是添加临时表空间数据文件时,不小心 ADD 到了文件系统中,然后发现,后悔了,还在OS层面 R ...
- 跨站跟踪攻击(CST/XST)
XSS与httponly 正常情况下,客户端脚本(如JS脚本)是可以通过document.cookie函数获得,这样如果有XSS跨站漏洞,cookie很容易被盗取.浏览器有一个安全策略,通过设置coo ...
- python3练习100题——012
今天继续,答案都通过py3测试. 原题链接:http://www.runoob.com/python/python-exercise-example12.html 题目:判断101-200之间有多少个 ...
- 深度学习之numpy.poly1d()函数
1.np.poly1d()此函数有两个参数: 参数1:为一个数组,若没有参数2,则生成一个多项式,例如: p = np.poly1d([2,3,5,7]) print(p) ==>> ...
- OpenCV的视频读取
现在找一个能拍摄视频的设备真是太容易了.结果大家都用视频来代替以前的序列图像.视频可能由两种形式得到,一个是像网络摄像头那样实时视频流,或者由其他设备产生的压缩编码后的视频文件.幸运的是,OpenCV ...
- 箭头函数 与 forEach
array.forEach(function(item,index){ }.bind(this)); 同 array.forEach((item,index) =>{ });
- 2.10 webdriver中 js 使用
来源: 使用Webdriver执行JS小结 http://lijingshou.iteye.com/blog/2018929 selenium常用的js总结 http://www.cnblogs. ...