梯度

import numpy as np
import matplotlib.pyplot as plt

plot_x = np.linspace(-1,6,141)

plot_y = (plot_x - 2.5)**2 -1

#plt.plot(plot_x,plot_y)
#plt.show()

#损失函数求导
def dJ(theta):
return 2*(theta - 2.5)

#损失函数
def J(theta):
try:
return (theta - 2.5)**2 - 1.
except:
return float('inf')

#eta = 0.1
#epsilon = 0.00000001
#theta = 0.0
#theta_history = [theta]
#while True:
# gradient = dJ(theta)
# last_theta = theta
# theta = theta - eta * gradient
# theta_history.append(theta)
# if (abs(J(theta) - J(last_theta))) < epsilon:
# break
#print(theta)
#print(J(theta))

#plt.plot(plot_x,J(plot_x))
#plt.plot(np.array(theta_history), J(np.array(theta_history)),color = 'r',marker='+')
#plt.show()
#print(len(theta_history))


def gradient_descent(initial_theta,eta,n_iters=1e4,epsilon=1e-8):
theta = initial_theta
theta_history.append(theta)
i_iters = 0

while i_iters < n_iters:
gradient = dJ(theta)
last_theta = theta
theta = theta - eta * gradient
theta_history.append(theta)
if (abs(J(theta) - J(last_theta))) < epsilon:
break
i_iters +=1

def plot_theta_history():
plt.plot(plot_x, J(plot_x))
plt.plot(np.array(theta_history), J(np.array(theta_history)), color='r', marker='+')
plt.show()

eta = 0.01
theta_history = []
gradient_descent(0.,eta)

plot_theta_history()

print(len(theta_history))

print(theta_history[-1])
posted @ 2018-12-18 10:30  何国秀_xue  阅读(133)  评论(0编辑  收藏  举报