机器学习-梯度下降与逻辑回归

Logistic Regression
目的:用逻辑回归来预测一个学生能否被大学录取
数据:有以前申请人的历史数据,用来逻辑回归的预测模型

建立一个分类模型,用来描述被录取入学的概率

#导入函数库
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#ipython内置的魔法函数(magic function)有了%matplotlib inline 就可以省掉plt.show(),在界面显示,而不弹出界面窗口
%matplotlib inline      
import os
path = "data"+os.sep+"LogiReg_data.txt"
pdData = pd.read_csv(path, header=None, names=['Exam1','Exam2','Admitted'])
pdData.head()

pdData.shape

positive = pdData[pdData['Admitted']==1]#return the subset of rows such 'Admitted'=1,the set of 'positive' example
negative = pdData[pdData['Admitted']==0]#return the subset of rows such 'Admitted'=0,the set of 'negative' example
fig,ax = plt.subplots(figsize=(10,5))
ax.scatter(positive['Exam1'],positive['Exam2'],s=30,c='b',marker='o',label='Admitted')
ax.scatter(negative['Exam1'],negative['Exam2'],s=30,c='r',marker='x',label='Not Admitted')
ax.legend()
ax.set_xlabel('Exam1 Score')
ax.set_ylabel('Exam2 Score')


def sigmoid(z):
    return 1/(1 + np.exp(-z))
nums = np.arange(-10,10,step=1)#create a vector containing 20 equally spaced values from -10 to 10
fig,ax = plt.subplots(figsize=(12,4))
ax.plot(nums,sigmoid(nums),'r')

def model(X,theta):
    return sigmoid(np.dot(X,theta.T))#no.dot():矩阵相乘


pdData.insert(0,'Ones',1)# in a try/except structure so as not to return an error if the block si executed several times
#set X(training data)and y (target variable)
orig_data = pdData.as_matrix() #convert the Pandas representation of the data to an array useful for further computations
cols = orig_data.shape[1] #shape[1] stands for axis
x = orig_data[:,0:cols-1]
y = orig_data[:,cols-1:cols]
#convert to numpy arrays and initialize the parameter array theta
# X = np.matrix(X.values)
# Y = np.matrix(data.iloc[:,3:4].values)#np.array(y.values)
theta = np.zeros([1,3])
x[:cols]#x[:5]

theta

x.shape,y.shape,theta.shape

def cost(x,y,theta):
    left = np.multiply(-y,np.log(model(x,theta)))
    right = np.multiply(1-y,np.log(1-model(x,theta)))
    return np.sum(left-right)/(len(x))
cost(x,y,theta)

def gradient(x,y,theta):
    grad = np.zeros(theta.shape)
    error = (model(x,theta)-y).ravel()#ravel():将多维数组降为一维
    for j in range(len(theta.ravel())):# for each parameter
        term = np.multiply(error,x[:,j])
        grad[0,j] = np.sum(term)/len(x)
    return grad
gradient(x,y,theta)

STOP_ITER = 0
STOP_COST = 1
STOP_GRAD = 2
def stopGriterion(type,value,threshold):
    #设置三种不同的停止策略
    if type == STOP_ITER:
        return value > threshold
    elif type == STOP_COST:
        return abs(value[-1]-value[-2]) < threshold
    elif type == STOP_GRID:
        return np.linalg.norm(value) < threshold
import numpy.random
#洗牌
def shuffleData(data):
    np.random.shuffle(data)
    cols = data.shape[1]
    x = data[:,0:cols-1]
    y = data[:,cols-1:]
    return x, y
import time
def descent(data,theta,batchSize,stopType,thresh,alpha):
    #梯度下降求解
    init_time = time.time()
    i = 0 #迭代次数
    k = 0 #batch
    x,y = shuffleData(data)
    grad = np.zeros(theta.shape) #计算的梯度
    costs = [cost(x,y,theta)] #损失值
    
    while True:
        grad = gradient(x[k:k+batchSize],y[k:k+batchSize],theta)
        k += batchSize  #取batch数量个数据
        if k >= n:
            k = 0
            x, y = shuffleData(data) #重新洗牌
        theta = theta - alpha*grad #参数更新
        costs.append(cost(x,y,theta))#计算新的损失
        i+= 1
        
        if stopType == STOP_ITER:
            value = i
        elif stopType == STOP_COST:
            value = costs
        elif stopType == STOP_GRAD:
            value = grad
        if stopGriterion(stopType,value,thresh):
            break
    return theta, i-1, costs, grad, time.time()-init_time 
def runExpe(data,theta,batchSize,stopType,thresh,alpha):
    #import pdb:pdb.set_trace():
    theta,iter,costs,grad,dur = descent(data,theta,batchSize,stopType,thresh,alpha)
    name = "Original" if(data[:,1]>2).sum() > 1 else "Scaled"
    name += " data - learning rate: {} -".format(alpha)
    if batchSize == n:
        strDescType = "Gradient"
    elif batchSize == 1:
        strDescType = "Stochastic"
    else:
        strDescType ="Mini-batch ({})".format(batchSize)
    name += strDescType + " descent - Stop"
    if stopType == STOP_ITER:
        strStop = "{} iterations".format(thresh)
    elif stopType ==STOP_COST:
        strStop = "costs change < {}".format(thresh)
    else:
        strStop = "gradient norm < {}".format(thresh)
    name += strStop
    print("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(name,theta,iter,costs[-1],dur))
    fix, ax = plt.subplots(figsize=(12,4))
    ax.plot(np.arange(len(costs)),costs,'r')
    ax.set_xlabel('Iterations')
    ax.set_ylabel('Cost')
    ax.set_title(name.upper()+' - Error vs. Iteration')
    return theta

#选择的梯度下降方法是基于所有样本的
n = 100
runExpe(orig_data,theta,n,STOP_ITER,thresh=5000,alpha=0.000001)


runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001)


runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001)

runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002)


runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001)


from sklearn import preprocessing as pp
scaled_data = orig_data.copy()
scaled_data[:,1:3] = pp.scale(orig_data[:,1:3])
runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.001)

runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.02, alpha=0.001)

theta = runExpe(scaled_data, theta, 1, STOP_GRAD, thresh=0.002/5, alpha=0.001)

runExpe(scaled_data, theta, 16, STOP_GRAD, thresh=0.002*2, alpha=0.001)

#设定阈值
def predict(x, theta):
    return [1 if x >= 0.5 else 0 for x in model(x,theta)]
scaled_x = scaled_data[:,:3]
y = scaled_data[:,3]
predictions = predict(scaled_x, theta)
correct = [1 if((a==1 and b==1) or (a==0 and b==0)) else 0 for (a,b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print("accuracy={0}%".format(accuracy))








猜你喜欢

转载自blog.csdn.net/poyue8754/article/details/81065469
今日推荐