import numpy as np#1采樣數據data = []#保存樣本集的列表for i in range(100):#循環采樣100個點    x = np.random.uniform(-10,10)#隨機采樣輸入X    # 采樣高斯噪聲    eps = np.random.normal(0.,0.1)    #得到模型的輸出    y = 1.477*x +0.089 +eps    data.append([x,y])#保存樣本點data = np.array(data)#轉換為2維數組print(data)#2計算誤差def mse(b,w,points):    #根據當前的w,b參數計算軍方差損失    totalError = 0    for i in range(0,len(points)):#循環迭代所有點        x = points[i,0]# 獲得i號店的輸入x        y = points[i,1]# 獲得i號點的輸出y        totalError+=(y-(w*x+b))**2    return totalError/float(len(points))#得到均方差#3,計算梯度def step_gradient(b_current,w_current,points,lr):    #計算誤差函數在所有點上的導數,并更新w,b    b_gradient = 0    w_gradient =0    M = float(len(points))    for i in range(0,len(points)):        x = points[i,0]        y=points[i,1]        b_gradient +=(2/M)*((w_current*x+b_current)-y)        w_gradient += (2/M) * x*((w_current*x+b_current) - y)    #根據梯度下降算法更新w,b,lr為xuexilv    new_b = b_current -(lr*b_gradient)    new_w = w_current -(lr * w_gradient)    return [new_b,new_w]#梯度更新def gradient_descent(points,starting_b,starting_w,lr,num_iterations):    b = starting_b    w = starting_w    for step in range(num_iterations):        b,w = step_gradient(b,w,np.array(points),lr)        loss = mse(b,w,points)        if step%50 == 0:            print("iteration:{},loss:{},w:{},b:{}".format(step,loss,w,b))    return [b,w]def main():    lr = 0.01    initial_b=0    initial_w = 0    num_iterations = 1000    [b,w] = gradient_descent(data,initial_b,initial_w,lr,num_iterations)    loss = mse(b,w,data)    print("final loss:{},w:{},b:{}".format(loss,w,b))