机器学习基石笔记:Homework #3 LinReg&LogReg相关习题
原文地址:http://www.jianshu.com/p/311141f2047d
问题描述




程序实现
13-15
# coding: utf-8
import numpy as np
import numpy.random as random
import matplotlib.pyplot as plt
def sign(x):
if(x>=0):
return 1
else:
return -1
def gen_data():
x1=random.uniform(-1,1,1000)
x2=random.uniform(-1,1,1000)
id_array=random.permutation([i for i in range(1000)])
dataY=np.zeros((1000,1))
for i in range(1000):
if(i<1000*0.1):
i = id_array[i]
dataY[i][0]=-sign(x1[i]**2+x2[i]**2-0.6)
else:
i = id_array[i]
dataY[i][0]=sign(x1[i]**2+x2[i]**2-0.6)
dataX=np.concatenate((np.ones((1000,1)),np.array(x1).reshape((1000,1)),np.array(x2).reshape((1000,1))),axis=1)
return dataX,dataY
def w_lin(dataX,dataY):
dataX_T=np.transpose(dataX)
tmp=np.dot(np.linalg.inv(np.dot(dataX_T,dataX)),dataX_T)
return np.dot(tmp,dataY)
def pred(dataX,wLIN):
pred=np.dot(dataX,wLIN)
num_data=dataX.shape[0]
for i in range(num_data):
pred[i][0]=sign(pred[i][0])
return pred
def zero_one_cost(pred,dataY):
return np.sum(pred!=dataY)/dataY.shape[0]
def feat_transform(dataX):
num_data=dataX.shape[0]
tmp1=dataX[:,1]*dataX[:,2]
tmp2=dataX[:,1]**2
tmp3=dataX[:,2]**2
new_dataX=np.concatenate(
(dataX,tmp1.reshape((num_data,1)),tmp2.reshape((num_data,1)),tmp3.reshape((num_data,1))),axis=1)
return new_dataX
if __name__=="__main__":
cost_list=[]
for i in range(1000):
dataX,dataY=gen_data()
wLIN=w_lin(dataX,dataY)
cost_list.append(zero_one_cost(pred(dataX,wLIN),dataY))
# show results
print("the average Ein over 1000 experiments: ",sum(cost_list)/len(cost_list))
plt.figure()
plt.hist(cost_list)
plt.xlabel("zero_one Ein")
plt.ylabel("frequency")
plt.title("13")
plt.savefig("13.png")
W=[]
cost_list=[]
for i in range(1000):
# train
dataX,dataY=gen_data()
dataX=feat_transform(dataX)
wLIN=w_lin(dataX,dataY)
W.append(wLIN[:,0].tolist())
# test
testX, testY = gen_data()
testX = feat_transform(testX)
cost_list.append(zero_one_cost(pred(testX, wLIN), testY))
min_cost=min(cost_list)
min_id=cost_list.index(min_cost)
print(W[min_id])
W=np.array(W)
# show w3
print("the average w3 over 1000 experiments: ",np.average(W,axis=0)[3])
plt.figure()
plt.hist(W[:,3].tolist())
plt.xlabel("w3")
plt.ylabel("frequency")
plt.title("14")
plt.savefig("14.png")
# show Eout
print("the average Eout over 1000 experiments: ",sum(cost_list)/len(cost_list))
plt.figure()
plt.hist(cost_list)
plt.xlabel("Eout")
plt.ylabel("frequency")
plt.title("15")
plt.savefig("15.png")
18-20
# coding: utf-8
import numpy as np
def sigmoid(x):
return 1/(1+np.e**(-x))
def read_data(dataFile):
with open(dataFile,'r') as f:
lines=f.readlines()
data_list=[]
for line in lines:
line=line.strip().split()
data_list.append([1.0] + [float(l) for l in line])
dataArray=np.array(data_list)
num_data=dataArray.shape[0]
num_dim=dataArray.shape[1]-1
dataX=dataArray[:,:-1].reshape((num_data,num_dim))
dataY=dataArray[:,-1].reshape((num_data,1))
return dataX,dataY
def gradient_descent(w,dataX,dataY,eta):
assert w.shape[0]==dataX.shape[1],"wrong shape!"
assert w.shape[1]==1,"wrong shape of w!"
num_data=dataX.shape[0]
num_dim=dataX.shape[1]
tmp1=-dataY*dataX
tmp2=-dataY*np.dot(dataX,w)
for i in range(num_data):
tmp2[i][0]=sigmoid(tmp2[i][0])
tmp3=np.average(tmp1 * tmp2, axis=0)
new_w=w-eta*tmp3.reshape((num_dim,1))
return new_w
def s_gradient_descent(w,dataX,dataY,eta):
assert w.shape[0]==dataX.shape[1],"wrong shape!"
assert w.shape[1]==1,"wrong shape of w!"
assert dataX.shape[0]==1,"wrong shape of x!"
assert dataY.shape[0]==1,"wrong shape of y!"
num_dim=dataX.shape[1]
tmp1=-dataY*dataX
tmp2=-dataY*np.dot(dataX,w)
tmp2[0][0]=sigmoid(tmp2[0][0])
tmp3=np.average(tmp1 * tmp2, axis=0)
new_w=w-eta*tmp3.reshape((num_dim,1))
return new_w
def pred(wLOG,dataX):
pred=np.dot(dataX,wLOG)
num_data=dataX.shape[0]
for i in range(num_data):
pred[i][0]=sigmoid(pred[i][0])
if(pred[i][0]>=0.5):
pred[i][0]=1
else:
pred[i][0]=-1
return pred
def zero_one_cost(pred,dataY):
return np.sum(pred!=dataY)/dataY.shape[0]
if __name__=="__main__":
# train
dataX,dataY=read_data("hw3_train.dat")
num_dim=dataX.shape[1]
w=np.zeros((num_dim,1))
print("\n18")
for i in range(2000):
w=gradient_descent(w,dataX,dataY,eta=0.001)
print("the weight vector within g: ",w[:,0])
# test
testX,testY=read_data("hw3_test.dat")
Eout=zero_one_cost(pred(w,testX),testY)
print("the Eout(g) on the test set: ",Eout)
print("\n18.1")
w = np.zeros((num_dim, 1))
for i in range(20000):
w = gradient_descent(w, dataX, dataY, eta=0.001)
print("the weight vector within g: ", w[:, 0])
# test
Eout = zero_one_cost(pred(w, testX), testY)
print("the Eout(g) on the test set: ", Eout)
print("\n19")
w=np.zeros((num_dim,1))
for i in range(2000):
w = gradient_descent(w, dataX, dataY, eta=0.01)
print("the weight vector within g: ", w[:, 0])
# test
Eout = zero_one_cost(pred(w, testX), testY)
print("the Eout(g) on the test set: ", Eout)
print("\n20")
w=np.zeros((num_dim,1))
num_data=dataX.shape[0]
for i in range(2000):
i%=num_data
x=dataX[i,:].reshape((1,num_dim))
y=dataY[i,:].reshape((1,1))
w=s_gradient_descent(w,x,y,eta=0.001)
print("the weight vector within g: ", w[:, 0])
# test
Eout = zero_one_cost(pred(w, testX), testY)
print("the Eout(g) on the test set: ", Eout)
运行结果及分析
13-15




18-20

对比18和18.1,可知迭代步长较小时,需要较多迭代次数才能达到较优效果。
机器学习基石笔记:Homework #3 LinReg&LogReg相关习题的更多相关文章
- 机器学习基石笔记:Homework #1 PLA&PA相关习题
原文地址:http://www.jianshu.com/p/5b4a64874650 问题描述 程序实现 # coding: utf-8 import numpy as np import matpl ...
- 机器学习基石笔记:Homework #2 decision stump相关习题
原文地址:http://www.jianshu.com/p/4bc01760ac20 问题描述 程序实现 17-18 # coding: utf-8 import numpy as np import ...
- 机器学习基石笔记:11 Linear Models for Classification、LC vs LinReg vs LogReg、OVA、OVO
原文地址:https://www.jianshu.com/p/6f86290e70f9 一.二元分类的线性模型 线性回归后的参数值常用于PLA/PA/Logistic Regression的参数初始化 ...
- 机器学习基石笔记:Homework #4 Regularization&Validation相关习题
原文地址:https://www.jianshu.com/p/3f7d4aa6a7cf 问题描述 程序实现 # coding: utf-8 import numpy as np import math ...
- 机器学习基石:Homework #0 SVD相关&常用矩阵求导公式
- 林轩田机器学习基石笔记1—The Learning Problem
机器学习分为四步: When Can Machine Learn? Why Can Machine Learn? How Can Machine Learn? How Can Machine Lear ...
- 机器学习基石笔记:01 The Learning Problem
原文地址:https://www.jianshu.com/p/bd7cb6c78e5e 什么时候适合用机器学习算法? 存在某种规则/模式,能够使性能提升,比如准确率: 这种规则难以程序化定义,人难以给 ...
- 机器学习基石笔记:04 Feasibility of Learning
原文地址:https://www.jianshu.com/p/f2f4d509060e 机器学习是设计算法\(A\),在假设集合\(H\)里,根据给定数据集\(D\),选出与实际模式\(f\)最为相近 ...
- 机器学习基石笔记:03 Types of Learning
原文地址:https://www.jianshu.com/p/86b2a9cef742 一.学习的分类 根据输出空间\(Y\):分类(二分类.多分类).回归.结构化(监督学习+输出空间有结构): 根据 ...
随机推荐
- Codeforces 388C Fox and Card Game (贪心博弈)
Codeforces Round #228 (Div. 1) 题目链接:C. Fox and Card Game Fox Ciel is playing a card game with her fr ...
- B - Heshen's Account Book HihoCoder - 1871
题目链接:https://hihocoder.com/problemset/problem/1871 思路:满满的细节满满的坑,尤其是 123df123 居然也要算成123123 的时候真是惊呆了,我 ...
- activiti7查询流程定义的相关信息
package com.zcc.activiti02; import org.activiti.engine.ProcessEngine;import org.activiti.engine.Proc ...
- javascript常用经典算法实例详解
javascript常用经典算法实例详解 这篇文章主要介绍了javascript常用算法,结合实例形式较为详细的分析总结了JavaScript中常见的各种排序算法以及堆.栈.链表等数据结构的相关实现与 ...
- JavaScript 学习笔记(初学者)
Java Script 基础 一. JS的简介 JavaScript是一种网页编程技术,经常用于创建动态交互网页 JavaScript是一种基于对象和事件驱动的解释性脚本语言,类似C语 ...
- 数据持久化之轻量级Kv持久化(二)
阿里P7Android高级架构进阶视频免费学习请点击:https://space.bilibili.com/474380680本篇文章将继续从以下两个内容来介绍轻量级Kv持久化: [SharedPre ...
- mysql 5.7.20 从frm文件中得到建表语句 (使用 mysql-utilities)
系统环境 centos 7.2 mysql社区版 5.7.20 mysql-utilities 根据官网的说法,截止到2018年5月30日,实用工具的一些功能在Shell的路线图中,鼓励用户迁 ...
- Linux文件数据类型
文件的元数据信息及其含义 查看方式 stat file 例如: 修改文件的时间戳 touch 命令格式: touch [ OPTION ] ... FILE ... 例如: touch aaa.tx ...
- USACO 2014 US Open Odometer /// 枚举
题目大意: 给定区间 l r 求区间包含多少个数 它们各个位的数只有一个不一样 注意 多个位但多个数为0单个数为x的情况 这种情况只有 x000 即把单个数放在首位才是正确的 同样注意 多个位但单个数 ...
- Centos 文件权限修改
1.查看权限 # ls -l dirPath 2.修改权限,root权限执行(-R 子目录的权限都会改变) # chmod -R dirPath