logistics回归
logistic回归的基本思想
logistic回归是一种分类方法,用于两分类问题。其基本思想为:
a. 寻找合适的假设函数,即分类函数,用以预测输入数据的判断结果;
b. 构造代价函数,即损失函数,用以表示预测的输出结果与训练数据的实际类别之间的偏差;
c. 最小化代价函数,从而获取最优的模型参数。
import numpy
from numpy import *
import matplotlib.pyplot as plt
import random
def loadDataSet(filename):
fr = open(filename)
dataMat = []
labelMat = []
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append( [1.0,float(lineArr[0]),float(lineArr[1])] )
labelMat.append(int(lineArr[2]))
return dataMat,labelMat #阶跃函数
def sigmoid(inX):
return 1.0/(1 + numpy.exp(-inX)) #基于梯度上升法的logistic回归分类器
def gradAscent(dataMatIn,classLabels):
dataMatrix = mat(dataMatIn)
labelMatrix = mat(classLabels).transpose()
m , n = shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500
weights = ones((n,1))
#对回归系数进行maxCycles次梯度上升
for i in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = labelMatrix - h
weights = weights + alpha * dataMatrix.transpose() * error
return weights #分析数据:画出决策边界
def plotBestFit(weights):
dataMat,labelMat = loadDataSet('test.txt')
dataArr = array(dataMat)
n = list(shape(dataArr))[0]
xcord1 = [] ; ycord1 = []
xcord2 = [] ; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green') #最佳拟合直线
x = arange(-3.0, 3.0, 0.1)
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',shape(x)) y = (-weights[0] - weights[1] * x) / weights[2]
print('-----------------------------------------',shape(y))
ax.plot(x,y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() #随机梯度上升
def stocGradAscent0(dataMatrix,classLabels):
m , n = numpy.shape(dataMatrix)
alpha = 0.01#步长
weights = numpy.ones((n))
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights #改进的随机梯度上升
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
m , n = shape(dataMatrix)
weights = ones(n)
dataIndex = list(range(m))
print (dataIndex)
for j in range(numIter):
for i in range(m):
alpha = 4/(1.0+j+i) + 0.1 #alpha每次迭代都要调整
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid (sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del dataIndex[randIndex]
print("randIndex",randIndex)
print("dataIndex",dataIndex)
if randIndex==0:
return weights if __name__ == '__main__':
dataArr,labelMat = loadDataSet('test.txt')
weights = stocGradAscent1(array(dataArr),labelMat)
# weights = gradAscent(dataArr,labelMat)
# print(shape(weights))
plotBestFit(weights)
应用:从疝气病预测病马的死亡率
import numpy
from numpy import *
import matplotlib.pyplot as plt
import random #阶跃函数
def sigmoid(inX):
return 1.0/(1 + numpy.exp(-inX)) #分类回归函数
def classifyVector(inX,weights):
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0 #改进的随机梯度上升算法
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m, n = shape (dataMatrix)
weights = ones (n)
dataIndex = list (range (m))
for j in range (numIter):
for i in range (m):
alpha = 4 / (1.0 + j + i) + 0.1 # alpha每次迭代都要调整
randIndex = int (random.uniform (0, len (dataIndex)))
h = sigmoid (sum (dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del dataIndex[randIndex]
if randIndex == 0:
return weights #测试,返回错误率
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
curLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(curLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(curLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500) errorCount = 0
numTestVec = 0
for line in frTest.readlines():
numTestVec += 1.0
curLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(curLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(curLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print("错误率",errorRate)
return errorRate def multiTest():
numTests = 10
errorSum = 0.0
for i in range(numTests):
errorSum += colicTest()
print("%d 次迭代之后,平均错误率为%f"%(numTests,errorSum/float(numTests))) multiTest()
logistics回归的更多相关文章
- logistics回归简单应用(二)
警告:本文为小白入门学习笔记 网上下载的数据集链接:https://pan.baidu.com/s/1NwSXJOCzgihPFZfw3NfnfA 密码: jmwz 不知道这个数据集干什么用的,根据直 ...
- logistics回归简单应用——梯度下降,梯度上升,牛顿算法(一)
警告:本文为小白入门学习笔记 由于之前写过详细的过程,所以接下来就简单描述,主要写实现中遇到的问题. 数据集是关于80人两门成绩来区分能否入学: 数据集: http://openclassroom.s ...
- 机器学习算法的Python实现 (1):logistics回归 与 线性判别分析(LDA)
先收藏............ 本文为笔者在学习周志华老师的机器学习教材后,写的课后习题的的编程题.之前放在答案的博文中,现在重新进行整理,将需要实现代码的部分单独拿出来,慢慢积累.希望能写一个机器学 ...
- 机器学习实战-Logistics回归
Logistics回归:实战,有两个特征X0,X1.100个样本,进行Logistics回归 1.导入数据 def load_data_set(): """ 加载数据集 ...
- Popular generalized linear models|GLMM| Zero-truncated Models|Zero-Inflated Models|matched case–control studies|多重logistics回归|ordered logistics regression
============================================================== Popular generalized linear models 将不同 ...
- logistics回归理解
多元回归方程:假设有一个因变量y和一组自变量x1, x2, x3, ... , xn,其中y为连续变量,我们可以拟合一个线性方程: y =β0 +β1*x1 +β2*x2 +β3*x3 +...+βn ...
- Deeplearning——Logistics回归
资料来源:1.博客:http://binweber.top/2017/09/12/deep_learning_1/#more——转载,修改更新 2.文章:https://www.qcloud.com/ ...
- 机器学习-对数logistics回归
今天 学习了对数几率回归,学的不是很明白x1*theat1+x2*theat2...=y 对于最终的求解参数编程还是不太会,但是也大致搞明白了,对数几率回归是由于线性回归函数的结果并不是我们想要的,我 ...
- 多分类Logistics回归公式的梯度上升推导&极大似然证明sigmoid函数的由来
https://blog.csdn.net/zhy8623080/article/details/73188671 也即softmax公式
随机推荐
- Makefile eval函数
https://www.cnblogs.com/gaojian/archive/2012/10/04/2711494.html对 makefile 中 eval 函数的学习体会 http://blog ...
- [Noi2016]区间[离散化+线段树维护+决策单调性]
4653: [Noi2016]区间 Time Limit: 60 Sec Memory Limit: 256 MBSubmit: 621 Solved: 329[Submit][Status][D ...
- gcc6.3的安装
author:headsen chen date: 2018-10-12 15:11:35 1,环境:centos7.3 ,64位,内核 3.10 2,安装过程 #!/bin/bash yum i ...
- .sh 的运行
cat *.sh 看一下你的那个sh文件 看第一行是#!/bin/bash 还果 #!/bin/sh 如果是/bin/bash就执行 bash your.sh 如果是/bin/sh 就执行 sh yo ...
- State Server实现多机器多站点 Session 共享 全手记
网络环境有2台windows 2008 (192.168.1.71,192.168.1.72) 需要部署成 WebFarm,提高容错性. 网站部署在2台机器上的2个站点,如何才能做到Session的共 ...
- 【Android】Android import和export使用说明 及 export报错:jarlist.cache: Resource is out of sync with the file syst解决
在Android开发export项目时发现有时会报错,内容如下: Problems were encountered during export: Error exporting PalmIdent ...
- Mongoose学习参考资料
我们学习node操作mongodb数据库时,可能经常要用到MongoDB 数据库的模型工具Mongoose,下面是我整理的一些学习资料: 番外篇之——使用 Mongoose:https://githu ...
- Thinkphp框架下设置session的过期时间
打开项目中的配置文件,添加session的过期配置,如下: 'SESSION_OPTIONS' => array( 'name' => 'BJYSESSION', //设置session名 ...
- iOS - 自动化编译打包(Jenkins)
从xcodebuild到shenzhen,再到Jenkins,完美演绎自动化操作. Features xcodebuild自动构建命令 简介 构建 生成ipa文件 利用 shenzhen 进行打包 J ...
- MongoDB 学习笔记2----条件操作符
条件操作符:用于两个比较两个表达式并从mongdb中获取文档 mongodb常见的操作符及解析说明 $lt:小于 example:ago<20 $lte:小于等于 example:<=20 ...