distance_travelled + b

paddle.to_tensor([[1.], [3.0], [5.0], [9.0], [10.0], [20.0]]) y_data =
paddle.to_tensor([[12.], [16.0], [20.0], [28.0], [30.0], [50.0]]) # define
calculate model linear = paddle.nn.Linear(in_features=1, out_features=1) #
prepare run paddle w_before_opt = linear.weight.numpy().item() b_before_opt =
linear.bias.numpy().item() print("w before optimize: {}".format(w_before_opt))
print("b before optimize: {}".format(b_before_opt)) """ how to learn

""" mse_loss = paddle.nn.MSELoss() sgd_optimizer =
run optimizer (learning) total_epoch = 5000 for i in range(total_epoch):
y_predict = linear(x_data) loss = mse_loss(y_predict, y_data) loss.backward()
sgd_optimizer.step() sgd_optimizer.clear_grad() if i % 1000 == 0: print("epoch
{} loss {}".format(i, loss.numpy())) print("finished training， loss
{}".format(loss.numpy())) # result w_after_opt = linear.weight.numpy().item()
b_after_opt = linear.bias.numpy().item() print("w after optimize:
{}".format(w_after_opt)) print("b after optimize: {}".format(b_after_opt))

import imp
w before optimize: 1.2779237031936646
b before optimize: 0.0
epoch 0 loss [269.062]
epoch 1000 loss [8.225423]
epoch 2000 loss [1.8391448]
epoch 3000 loss [0.4112188]
epoch 4000 loss [0.09194404]
finished training， loss [0.02058855]
w after optimize: 2.0182714462280273
b after optimize: 9.76637077331543

GitHub

Gitee