logistic回归的基本思想  

  logistic回归是一种分类方法,用于两分类问题。其基本思想为:

  a. 寻找合适的假设函数,即分类函数,用以预测输入数据的判断结果;

  b. 构造代价函数,即损失函数,用以表示预测的输出结果与训练数据的实际类别之间的偏差;

  c. 最小化代价函数,从而获取最优的模型参数。

 import numpy
from numpy import *
import matplotlib.pyplot as plt
import random
def loadDataSet(filename):
fr = open(filename)
dataMat = []
labelMat = []
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append( [1.0,float(lineArr[0]),float(lineArr[1])] )
labelMat.append(int(lineArr[2]))
return dataMat,labelMat #阶跃函数
def sigmoid(inX):
return 1.0/(1 + numpy.exp(-inX)) #基于梯度上升法的logistic回归分类器
def gradAscent(dataMatIn,classLabels):
dataMatrix = mat(dataMatIn)
labelMatrix = mat(classLabels).transpose()
m , n = shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500
weights = ones((n,1))
#对回归系数进行maxCycles次梯度上升
for i in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = labelMatrix - h
weights = weights + alpha * dataMatrix.transpose() * error
return weights #分析数据:画出决策边界
def plotBestFit(weights):
dataMat,labelMat = loadDataSet('test.txt')
dataArr = array(dataMat)
n = list(shape(dataArr))[0]
xcord1 = [] ; ycord1 = []
xcord2 = [] ; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green') #最佳拟合直线
x = arange(-3.0, 3.0, 0.1)
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',shape(x)) y = (-weights[0] - weights[1] * x) / weights[2]
print('-----------------------------------------',shape(y))
ax.plot(x,y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() #随机梯度上升
def stocGradAscent0(dataMatrix,classLabels):
m , n = numpy.shape(dataMatrix)
alpha = 0.01#步长
weights = numpy.ones((n))
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights #改进的随机梯度上升
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
m , n = shape(dataMatrix)
weights = ones(n)
dataIndex = list(range(m))
print (dataIndex)
for j in range(numIter):
for i in range(m):
alpha = 4/(1.0+j+i) + 0.1 #alpha每次迭代都要调整
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid (sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del dataIndex[randIndex]
print("randIndex",randIndex)
print("dataIndex",dataIndex)
if randIndex==0:
return weights if __name__ == '__main__':
dataArr,labelMat = loadDataSet('test.txt')
weights = stocGradAscent1(array(dataArr),labelMat)
# weights = gradAscent(dataArr,labelMat)
# print(shape(weights))
plotBestFit(weights)

应用:从疝气病预测病马的死亡率

import numpy
from numpy import *
import matplotlib.pyplot as plt
import random #阶跃函数
def sigmoid(inX):
return 1.0/(1 + numpy.exp(-inX)) #分类回归函数
def classifyVector(inX,weights):
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0 #改进的随机梯度上升算法
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m, n = shape (dataMatrix)
weights = ones (n)
dataIndex = list (range (m))
for j in range (numIter):
for i in range (m):
alpha = 4 / (1.0 + j + i) + 0.1 # alpha每次迭代都要调整
randIndex = int (random.uniform (0, len (dataIndex)))
h = sigmoid (sum (dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del dataIndex[randIndex]
if randIndex == 0:
return weights #测试,返回错误率
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
curLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(curLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(curLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500) errorCount = 0
numTestVec = 0
for line in frTest.readlines():
numTestVec += 1.0
curLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(curLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(curLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print("错误率",errorRate)
return errorRate def multiTest():
numTests = 10
errorSum = 0.0
for i in range(numTests):
errorSum += colicTest()
print("%d 次迭代之后,平均错误率为%f"%(numTests,errorSum/float(numTests))) multiTest()

  

logistics回归的更多相关文章

  1. logistics回归简单应用(二)

    警告:本文为小白入门学习笔记 网上下载的数据集链接:https://pan.baidu.com/s/1NwSXJOCzgihPFZfw3NfnfA 密码: jmwz 不知道这个数据集干什么用的,根据直 ...

  2. logistics回归简单应用——梯度下降,梯度上升,牛顿算法(一)

    警告:本文为小白入门学习笔记 由于之前写过详细的过程,所以接下来就简单描述,主要写实现中遇到的问题. 数据集是关于80人两门成绩来区分能否入学: 数据集: http://openclassroom.s ...

  3. 机器学习算法的Python实现 (1):logistics回归 与 线性判别分析(LDA)

    先收藏............ 本文为笔者在学习周志华老师的机器学习教材后,写的课后习题的的编程题.之前放在答案的博文中,现在重新进行整理,将需要实现代码的部分单独拿出来,慢慢积累.希望能写一个机器学 ...

  4. 机器学习实战-Logistics回归

    Logistics回归:实战,有两个特征X0,X1.100个样本,进行Logistics回归 1.导入数据 def load_data_set(): """ 加载数据集 ...

  5. Popular generalized linear models|GLMM| Zero-truncated Models|Zero-Inflated Models|matched case–control studies|多重logistics回归|ordered logistics regression

    ============================================================== Popular generalized linear models 将不同 ...

  6. logistics回归理解

    多元回归方程:假设有一个因变量y和一组自变量x1, x2, x3, ... , xn,其中y为连续变量,我们可以拟合一个线性方程: y =β0 +β1*x1 +β2*x2 +β3*x3 +...+βn ...

  7. Deeplearning——Logistics回归

    资料来源:1.博客:http://binweber.top/2017/09/12/deep_learning_1/#more——转载,修改更新 2.文章:https://www.qcloud.com/ ...

  8. 机器学习-对数logistics回归

    今天 学习了对数几率回归,学的不是很明白x1*theat1+x2*theat2...=y 对于最终的求解参数编程还是不太会,但是也大致搞明白了,对数几率回归是由于线性回归函数的结果并不是我们想要的,我 ...

  9. 多分类Logistics回归公式的梯度上升推导&极大似然证明sigmoid函数的由来

    https://blog.csdn.net/zhy8623080/article/details/73188671  也即softmax公式

随机推荐

  1. ThinkPad L421 如何进入BIOS?(已解决)

    开机屏幕出现ThinkPad标志时,快速按下 F1键 即可进入BIOS

  2. 小程序判断是否授权源码 auth.js

    一.auth.js const configGlobal = require('../config/config_global.js'); var util = require('function.j ...

  3. Delete触发器

  4. LeetCode 46 Permutations(全排列问题)

    题目链接:https://leetcode.com/problems/permutations/?tab=Description   Problem:给出一个数组(数组中的元素均不相同),求出这个数组 ...

  5. myisam与innodb索引比较

    MyISAM支持全文索引(FULLTEXT).压缩索引,InnoDB不支持 InnoDB支持事务,MyISAM不支持 MyISAM顺序储存数据,索引叶子节点保存对应数据行地址,辅助索引很主键索引相差无 ...

  6. tars环境部署

    author: headsen  chen date: 2018-10-18 12:35:40 注意:依据Git上的tars搭建步骤整理而来 参考: https://max.book118.com/h ...

  7. CentOS 6.4 php环境配置以及安装wordpress

    1. nginx php-rpm 包升级 sudo rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6- ...

  8. MONGOOSE – 让NODE.JS高效操作MONGODB(转载)

    Mongoose库简而言之就是在node环境中操作MongoDB数据库的一种便捷的封装,一种对象模型工具,类似ORM,Mongoose将数据库中的数据转换为JavaScript对象以供你在应用中使用. ...

  9. 关于IP地址子网的划分

  10. cp命令取消提示的方法

    Linux默认cp命令带参数-i如果有重复的文件会提示覆盖 查看cp别名 在大量复制的时候这个提示不友好,在脚本写复制命令也无法使用交互式输入 解决办法 1,修改别名 vi ~/.bashrc 注释掉 ...