使用MXNet提供的Gluon接口实现线性回归
mxnet的nn(neural networks)模块定义了大量的神经网络的层。loss模块定义了各种损失函数
MXNet的initializer模块提供了模型参数初始化的各种方法
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 from mxnet import autograd, ndfrom mxnet.gluon import loss as glossfrom mxnet.gluon import data as gdatafrom mxnet.gluon import nn as gnnfrom mxnet import initfrom mxnet import gluonnum_input = 2 num_example = 1000 true_w = [2 , -3.4 ] true_b = 4.2 features = nd.random.normal(scale=1 , shape=(num_example, num_input)) labels = true_w[0 ] * features[:, 0 ] + true_w[1 ] * features[:, 1 ] + true_b labels += nd.random.normal(scale=0.01 , shape=labels.shape) batch_size = 10 dataset = gdata.ArrayDataset(features, labels) data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True ) net = gnn.Sequential() net.add(gnn.Dense(1 )) net.initialize(init.Normal(sigma=0.01 )) loss = gloss.L2Loss() trainer = gluon.Trainer(net.collect_params(), 'sgd' , {'learning_rate' : 0.03 }) num_epochs = 3 for epoch in range (1 , num_epochs + 1 ): for x, y in data_iter: with autograd.record(): l = loss(net(x), y) l.backward() trainer.step(batch_size) l = loss(net(features), labels) print ('epoch %d, loss: %f' % (epoch, l.mean().asnumpy())) print ('true_w:' )print (true_w)print ('train_w: ' )print (net[0 ].weight.data())print ('true_b:' )print (true_b)print ('train_b: ' )print (net[0 ].bias.data())print (net[0 ].weight.data().grad)