This commit is contained in:
github 2021-01-03 12:28:58 +08:00
parent 62daef8fb0
commit 1192d42cf6
6 changed files with 315 additions and 0 deletions

100
梯度下降/data.txt Normal file
View File

@ -0,0 +1,100 @@
32.502345269453031,31.70700584656992
53.426804033275019,68.77759598163891
61.530358025636438,62.562382297945803
47.475639634786098,71.546632233567777
59.813207869512318,87.230925133687393
55.142188413943821,78.211518270799232
52.211796692214001,79.64197304980874
39.299566694317065,59.171489321869508
48.10504169176825,75.331242297063056
52.550014442733818,71.300879886850353
45.419730144973755,55.165677145959123
54.351634881228918,82.478846757497919
44.164049496773352,62.008923245725825
58.16847071685779,75.392870425994957
56.727208057096611,81.43619215887864
48.955888566093719,60.723602440673965
44.687196231480904,82.892503731453715
60.297326851333466,97.379896862166078
45.618643772955828,48.847153317355072
38.816817537445637,56.877213186268506
66.189816606752601,83.878564664602763
65.41605174513407,118.59121730252249
47.48120860786787,57.251819462268969
41.57564261748702,51.391744079832307
51.84518690563943,75.380651665312357
59.370822011089523,74.765564032151374
57.31000343834809,95.455052922574737
63.615561251453308,95.229366017555307
46.737619407976972,79.052406169565586
50.556760148547767,83.432071421323712
52.223996085553047,63.358790317497878
35.567830047746632,41.412885303700563
42.436476944055642,76.617341280074044
58.16454011019286,96.769566426108199
57.504447615341789,74.084130116602523
45.440530725319981,66.588144414228594
61.89622268029126,77.768482417793024
33.093831736163963,50.719588912312084
36.436009511386871,62.124570818071781
37.675654860850742,60.810246649902211
44.555608383275356,52.682983366387781
43.318282631865721,58.569824717692867
50.073145632289034,82.905981485070512
43.870612645218372,61.424709804339123
62.997480747553091,115.24415280079529
32.669043763467187,45.570588823376085
40.166899008703702,54.084054796223612
53.575077531673656,87.994452758110413
33.864214971778239,52.725494375900425
64.707138666121296,93.576118692658241
38.119824026822805,80.166275447370964
44.502538064645101,65.101711570560326
40.599538384552318,65.562301260400375
41.720676356341293,65.280886920822823
51.088634678336796,73.434641546324301
55.078095904923202,71.13972785861894
41.377726534895203,79.102829683549857
62.494697427269791,86.520538440347153
49.203887540826003,84.742697807826218
41.102685187349664,59.358850248624933
41.182016105169822,61.684037524833627
50.186389494880601,69.847604158249183
52.378446219236217,86.098291205774103
50.135485486286122,59.108839267699643
33.644706006191782,69.89968164362763
39.557901222906828,44.862490711164398
56.130388816875467,85.498067778840223
57.362052133238237,95.536686846467219
60.269214393997906,70.251934419771587
35.678093889410732,52.721734964774988
31.588116998132829,50.392670135079896
53.66093226167304,63.642398775657753
46.682228649471917,72.247251068662365
43.107820219102464,57.812512976181402
70.34607561504933,104.25710158543822
44.492855880854073,86.642020318822006
57.50453330326841,91.486778000110135
36.930076609191808,55.231660886212836
55.805733357942742,79.550436678507609
38.954769073377065,44.847124242467601
56.901214702247074,80.207523139682763
56.868900661384046,83.14274979204346
34.33312470421609,55.723489260543914
59.04974121466681,77.634182511677864
57.788223993230673,99.051414841748269
54.282328705967409,79.120646274680027
51.088719898979143,69.588897851118475
50.282836348230731,69.510503311494389
44.211741752090113,73.687564318317285
38.005488008060688,61.366904537240131
32.940479942618296,67.170655768995118
53.691639571070056,85.668203145001542
68.76573426962166,114.85387123391394
46.230966498310252,90.123572069967423
68.319360818255362,97.919821035242848
50.030174340312143,81.536990783015028
49.239765342753763,72.111832469615663
50.039575939875988,85.232007342325673
48.149858891028863,66.224957888054632
25.128484647772304,53.454394214850524

View File

@ -0,0 +1,59 @@
# https://blog.csdn.net/qq_37236745/article/details/103698507
import numpy as np
# compute loss
def compute_error(b, w, points):
totalError = 0
for i in range(len(points)):
x = points[i, 0]
y = points[i, 1]
totalError += (y - (w * x + b)) ** 2
return totalError / float(len(points)) # average
# compute gradient
def step_gradient(b_current, w_current, points, learningRate):
b_gradient = 0
w_gradient = 0
N = float(len(points))
for i in range(len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += 2 * ((w_current * x) + b_current - y)
w_gradient += 2 * x * ((w_current * x) + b_current - y)
b_gradient = b_gradient / N
w_gradient = w_gradient / N
new_b = b_current - (learningRate * b_gradient)
new_w = w_current - (learningRate * w_gradient)
return [new_b, new_w]
def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations): # num_iteration 迭代次数
b = starting_b
w = starting_w
error = np.zeros(num_iterations)
for i in range(num_iterations):
b, w = step_gradient(b, w, np.array(points), learning_rate)
error[i] = compute_error(b, w, points)
return [b, w],error
def run():
points = np.genfromtxt("data.txt", delimiter=",")
learning_rate = 0.000001
initial_b = np.random.random()
initial_w = np.random.random()
num_iterations = 1000
print("Starting gradient descent at b = {0}, w = {1}, error = {2}"
.format(initial_b, initial_w,
compute_error(initial_b, initial_w, points)))
print("Running...")
[b, w],error = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations)
print("After {0} iterations at b = {1}, w = {2}, error = {3}"
.format(num_iterations, b, w,
compute_error(b, w, points)))
import matplotlib.pyplot as plt
fig, bx = plt.subplots(figsize=(8, 6))
bx.plot(np.arange(num_iterations), error, 'r')
bx.set_xlabel('Iterations')
bx.set_ylabel('Cost')
bx.set_title('Error vs. Training Epoch')
plt.show()
run()

19
梯度下降/test.py Normal file
View File

@ -0,0 +1,19 @@
import numpy as np
import pandas as pd
a = np.array([1,2])
b = a.reshape(1,2)
# b = np.mat(a)
c = np.array([[1,2],[1,3],[1,4]])
# d = c.reshape(3,2)
d = np.mat(c)
print(f"a is {a} {a.shape} shape {a.shape[0]}, b is {b} {b.shape} shape {b.shape[0],b.shape[1]}, "
f"c is {c} {c.shape}, d is {d} {d.shape}")
# 注意 e = np.mat([[2],[1],[3]]) 是错误的
# 矩阵与矩阵相乘, 矩阵与数组相乘完全不同
e = np.array([[2],[1],[3]])
print(f"c*e is {c*e}\n")
f = np.mat([[2],[1]])
print(f" c shape is {c}, and f shape is {f} \n"
f"c*f ",c*f)

View File

@ -0,0 +1,73 @@
# https://www.cnblogs.com/judejie/p/8999832.html
# 工作年限与收入之间的散点图
# 导入第三方模块
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# 导入数据集
# income = pd.read_csv(r'Salary_Data.csv')
# 绘制散点图
# sns.lmplot(x = 'YearsExperience', y = 'Salary', data = income, ci = None)
# 显示图形
class GD():
def __init__(self,data,w,b, lr, nstep):
self.data = data
self.w = w
self.b = b
self.lr = lr
self.nstep = nstep
def loss(self,w,b):
x,y = self.data[:, 0],self.data[:, 1]
loss = 0
for i in range(self.data.shape[0]):
loss += (w*x[i]+b-y[i])**2
print(f"loss {loss}")
return loss/float(self.data.shape[0])
def grad(self,w,b,x,y):
x_grad,y_grad = 2*(w*x+b-y)*x,2*(w*x+b-y)
return x_grad,y_grad
def step_grad(self,w,b):
x, y = self.data[:, 0], self.data[:, 1]
w_gradient = 0
b_gradient = 0
for i in range(self.data.shape[0]):
w_tmp,b_tmp = self.grad(w,b,x[i],y[i])
w_gradient += w_tmp
b_gradient += b_tmp
w_new = w - w_gradient/self.data.shape[0]*self.lr
b_new = b - b_gradient /self.data.shape[0] * self.lr
return w_new,b_new
def gradientDescent(self):
# history = np.empty( (self.nstep+1, 2) )
error = np.zeros(self.nstep)
w,b = self.w,self.b
for i in range(self.nstep):
w,b = self.step_grad(w, b)
error[i]=self.loss(w,b)
return w,b,error
w = 12
b =12
income = pd.read_csv(r'Salary_Data.csv')
data = np.array(income.values)
nstep = 1000
lr = 0.001
gd = GD(data,w,b,lr,nstep)
w,b,error = gd.gradientDescent()
print("w,b is :",w,b)
x = data[:,0]
y = data[:,1]
plt.scatter(x,y)
# plt.legend()
plt.plot(x, w*x+b, 'r')
plt.show()
plt.plot(np.arange(nstep), error, 'r')
plt.show()
# 回归参数 11768.548124439758 10167.865862562581

View File

@ -0,0 +1,25 @@
import numpy as np
import math
from matplotlib import pyplot as plt
import torch
def f(r):
g = torch.tan(r)+9*torch.tan(3/4*math.pi-r)
return g
def drawfig(f):
x = torch.linspace(1.1/4*math.pi,0.99/2*math.pi,100)
y = f(x)
plt.plot(x,y)
plt.show()
r = torch.tensor([1.16/4*np.pi], requires_grad=True)
optimizer = torch.optim.Adam([r], lr=1e-4)
for step in range(10000):
pred = f(r)
optimizer.zero_grad()
pred.backward()
optimizer.step()
if step % 200 == 0:
print (f'step {step}: r = {r.tolist()}, f(r) = {pred.item()}')
drawfig(f)

View File

@ -0,0 +1,39 @@
import math
import numpy as np
import matplotlib.pyplot as plt
def f(r):
g = np.tan(r)+9*np.tan(3/4*np.pi-r)
return g
def drawfig(f):
x = np.linspace(1.1/4*np.pi,0.99/2*np.pi,100)
y = f(x)
# print(y)
plt.plot(x,y)
plt.show()
def grad(r):
g = 1.0/(np.cos(r)**2) - 9.0/(np.cos(3/4*np.pi-r)**2)
return g
def gradientDescent(f,r0, eta, nstep):
r_history = np.zeros(nstep+1)
f_history = np.zeros(nstep+1)
r_history[0] = r0
f_history[0] = f(r0)
r = r0
for i in range(1,nstep+1):
r = r - eta * grad(r)
r_history[i] = r
f_history[i] = f(r)
print(f"r is :{r} f(r) is {f(r)}")
return r_history,f_history
# print(f(1/3*np.pi))
drawfig(f)
x,y = gradientDescent(f, 1.12/4*np.pi, 0.0001*np.pi, 10000)
plt.plot(x,y)
plt.show()