from mxnet import autograd, nd
num_inputs = 2
num_examples = 1000
true_w = [2,-3.4]
true_b = 4.2
feature = nd.random.normal(scale=1,shape=(num_examples,num_inputs))
labels = true_w[0]*feature[:,0] + true_w[1]*feature[:,1] + true_b
#print(labels.shape) # 1000*1
labels += nd.random.normal(scale=0.01,shape=labels.shape)
print(labels)
from mxnet.gluon import data as gdata
batsh_size = 10
# 组合训练的特征和标签
dataset = gdata.ArrayDataset(feature,labels)
# 随机读取小批量
data_iter = gdata.DataLoader(dataset=dataset,batch_size=batsh_size,shuffle=True)
for X,y in data_iter:
print(X,y)
break
# 定义模型
from mxnet.gluon import nn
net = nn.Sequential()
net.add(nn.Dense(1))
# 初始化模型参数
from mxnet import init
net.initialize(init.Normal(sigma=0.01))
# 定义损失函数
from mxnet.gluon import loss as gloss
loss = gloss.L2Loss() # 平方损失
# 优化算法,小批量随机梯度下降(sgd),使用Trainer实例
# 该算法迭代net 所有通过add加进来的层所包含的参数,这些参数可以通过collect_params函数获取
from mxnet import gluon
trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.03})
# 训练模型
num_epochs = 3
for epoch in range(1,num_epochs+1):
for X,y in data_iter:
with autograd.record():
l = loss(net(X),y)
l.backward()
trainer.step(batsh_size)
l = loss(net(feature),labels)
print('epoch %d, loss: %f' %(epoch, l.mean().asnumpy()))
# 从net中获取访问学习到的模型参数,权重(weight) 和偏差(bias)
dense = net[0]
print(true_w,dense.weight.data())
print(true_b,dense.bias.data())