Logistic

 
 
 
 
import numpy as np
import math
import matplotlib.pyplot as plt

data1 = np.loadtxt("data1.txt", delimiter=",")
data2 = np.loadtxt("data2.txt", delimiter=",")
x = np.c_[np.ones((data1.shape[0],1)),data1[:,:2]]
y = np.c_[data1[:,2]]

n_sample = x.shape[0]
n_feature = x.shape[1]
print(n_sample, n_feature)

posotive = np.where(y == 1)
negative = np.where(y == 0)
plt.figure(num=1, figsize=(6, 4), edgecolor="b")
plt.scatter(x[posotive, 0], x[posotive, 1], c="b", marker="x", s=12)
plt.scatter(x[negative, 0], x[negative, 1], c="r", marker="o", s=12)
plt.legend(["Pass", "Failed"])


########Sigmoid function#####
# def sigmoid(z):
#     return 1.0 / (1.0 + np.exp(-z))


def sigmoid(z):
    return(1 / (1 + np.exp(-z)))


# plt.figure(num=2, figsize=(6, 4))
# x = np.linspace(-10, 10, 100)
# y = sigmoid(x)
# plt.plot(x, y)
# plt.show()



#########Cost function######
def cost(x, y, theta):
    h = sigmoid(x.dot(theta))
    costvalue = (-1.0 / n_sample) * (np.log(h).T.dot(y) + np.log(1-h).T.dot(1-y))
    if np.isnan(costvalue[0]):
        return (np.inf)
    return costvalue[0]

######### gradient ########
def gradient(x, y, theta):
    theta = theta.reshape(-1,1)
    grad = np.zeros(n_feature)
    temp = y - sigmoid(x.dot(theta))
    grad = (1/n_sample)*x.T.dot(temp)
    # for j in range(n_feature):
    #     grad[j] = (1.0 / n_sample) * temp.T.dot(x[:, j])

    return grad.flatten()


########  theta update  ######
def theta_update(theta, gradient, sigma):
    return theta + sigma * gradient


######## stop_strategy  #######
def stop_strategy(cost, cost_update, threshold):
    return cost_update - cost < threshold

def predict(x,theta,thredshold):
    h = sigmoid(x.dot(theta.reshape(-1,1))) >= thredshold
    return h.astype("int")

def logistic(x, y, sigma, threshold):
    theta = np.zeros(n_feature)
    counter = 0
    while (1):
        J = cost(x, y, theta)
        print("J:",J)
        print("theta:",theta)
        grad = gradient(x, y, theta)
        print("grad:",grad)
        theta_new = theta_update(theta, grad, sigma)
        print("theta,theta_new",theta,theta_new)
        J_update = cost(x, y, theta_new)
        print("J_update:",J_update)
        print("update:",(J_update - J))
        stop = stop_strategy(J, J_update, threshold)
        if stop:
            break

        theta = theta_new
        counter = counter + 1

    return theta,counter

theta,counter = logistic(x,y,0.001,0.001)
print("theta:",theta)

print(sigmoid(np.array([1,80,60]).dot(theta.reshape(-1,1))))
a = predict(np.array([1,80,60]),theta,0.5)
print("Predict:",a)

# theta = np.zeros(n_feature)
# a = cost(x,y,theta)
# b = gradient(x,y,theta)
# print(a)
# print(b)

猜你喜欢

转载自blog.csdn.net/u010016056/article/details/80708526