Logistic回归实战篇之预测病马死亡率

利用sklearn.linear_model.LogisticRegression训练和测试算法。

示例代码:

import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.linear_model import LogisticRegression


def stocGradAscent1(dataMatrix, classLabels, numIter=150):  #随机梯度上升算法
    m,n = np.shape(dataMatrix) #返回dataMatrix的大小。m为行数,n为列数。
    weights = np.ones(n) #参数初始化
    for j in range(numIter):
        dataIndex = list(range(m))
        for i in range(m):
            alpha = 4/(1.0+j+i)+0.01 #降低alpha的大小,每次减小1/(j+i)。
            randIndex = int(random.uniform(0,len(dataIndex))) #随机选取样本
            h = sigmoid(sum(dataMatrix[randIndex]*weights)) #选择随机选取的一个样本,计算h
            error = classLabels[randIndex] - h #计算误差
            weights = weights + np.dot(alpha * error ,dataMatrix[randIndex])  #更新回归系数
            del(dataIndex[randIndex]) #删除已经使用的样本
        return weights

def loadDataSet():  #数据处理,得到向量
    dataMat = [];labelMat = []
    fr = open('testSet.txt')
    for line in fr.readlines():
        lineArr = line.strip().split()
        dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
        labelMat.append(int(lineArr[2]))
    fr.close()
    return dataMat,labelMat

def sigmoid(intX):  #计算sigmoid
    return 1.0/(1+np.exp(-intX))

def gradAscent(dataMatIn,classLabels):  #梯度上升算法,得到个特征值的权重
    dataMatrix = np.mat(dataMatIn)
    labelMat = np.mat(classLabels).transpose()
    m,n = np.shape(dataMatrix)
    alpha = 0.01
    maxCycles = 500
    weights = np.ones((n,1))
    for k in range(maxCycles):
        h = sigmoid(dataMatrix*weights)
        error = labelMat - h
        weights += alpha * dataMatrix.transpose() * error
    return weights

def plotBestFit(weights):   #绘制数据集和数据划分线w0x0+w1x1+w2x2=0
    dataMat,labelMat = loadDataSet()
    dataArr = np.array(dataMat)
    n = np.shape(dataArr)[0]
    xcord1 = [];ycord1 = []
    xcord2 = [];ycord2 = []
    for i in range(n):
        if int(labelMat[i]) == 1:
            xcord1.append(dataArr[i,1]);ycord1.append(dataArr[i,2])
        else:
            xcord2.append(dataArr[i,1]);ycord2.append(dataArr[i,2])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
    ax.scatter(xcord2,ycord2,s=30,c='green')
    x = np.arange(-3.0,3.0,0.1)
    y = (-weights[0] - weights[1]*x)/weights[2]
    ax.plot(x,y)
    plt.xlabel('X1');plt.ylabel('X2')
    plt.show()

def classifyVector(intX,weights):   #将数据分类
    weights = weights.reshape(-1,)  #将(n,1)数组转换成(n,)
    prob = sigmoid(sum(intX*weights))
    if prob > 0.5:
        return 1.0
    else:
        return 0.0

def colicTest():    #测试算法
    frTrain = open('horseColicTraining.txt')
    frTest = open('horseColicTest.txt')
    trainingSet = []
    trainingLabels = []
    for line in frTrain.readlines():
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(len(currLine)-1):
            lineArr.append(float(currLine[i]))
        trainingSet.append(lineArr)
        trainingLabels.append(float(currLine[-1]))
    trainWeights = stocGradAscent1(np.array(trainingSet),trainingLabels,500)
    #trainWeights = gradAscent(np.array(trainingSet), trainingLabels)
    errorCount = 0;numTestVec = 0.0
    for line in frTest.readlines():
        numTestVec += 1.0
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(len(currLine)-1):
            lineArr.append(float(currLine[i]))
        if int(classifyVector(np.array(lineArr), trainWeights))!= int(currLine[-1]):
            errorCount += 1
    errorRate = (float(errorCount)/numTestVec)*100
    print("测试集错误率为: %.2f%%" % errorRate)

def colicSklearn(): #运用SKLEARN中的LogisticRegression测试算法准确率
    frTrain = open('horseColicTraining.txt')
    frTest = open('horseColicTest.txt')  # 打开测试集
    trainingSet = [];trainingLabels = []
    testSet = [];testLabels = []
    for line in frTrain.readlines():
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(len(currLine) - 1):
            lineArr.append(float(currLine[i]))
        trainingSet.append(lineArr)
        trainingLabels.append(float(currLine[-1]))
    for line in frTest.readlines():
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(len(currLine) - 1):
            lineArr.append(float(currLine[i]))
        testSet.append(lineArr)
        testLabels.append(float(currLine[-1]))
    classifier = LogisticRegression(solver='liblinear', max_iter=20).fit(trainingSet, trainingLabels)
    test_accurcy = classifier.score(testSet, testLabels) * 100
    print('正确率:%f%%' % test_accurcy)

if __name__ == '__main__':
    #colicTest()
    colicSklearn()

参考:https://blog.csdn.net/c406495762/article/details/77851973,这里面讲的很详细。

posted @ 2019-07-24 10:34  xuxiaowen1990  阅读(718)  评论(0)    收藏  举报