搭建简单模型的一般框架 
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 import  torchfrom  torch.utils import  datafrom  torch import  nndataset = data.TensorDataset(features, labels) data_iter = data.DataLoader(dataset, batch_size, shuffle=True ) model = nn.Sequential(nn.Linear(x, y), ...) loss = nn.loss_fun_name() trainer = nn.optim.optimizer_name(model.parameters(), lr=learning_rate) for  epoch in  len (epoch_num):	for  X, y in  data_iter:         l = loss(model(X), y)         trainer.zero_grad()         l.backward()         trainer.step()     print (loss(model(features, labels))) 
逻辑回归的实现 
features(二维)示例: 
feature1(数值) 
feature2(数值) 
 
 
f11 
f21 
 
f12 
f22 
 
f13 
f23 
 
… 
… 
 
 
labels示例(和每一条feature一一对应): 
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 import  torchfrom  torch.utils import  datatrue_w = torch.tensor([2 , -3.4 ]) true_b = 4.2  features, labels = d2l.synthetic_data(true_w, true_b, 1000 ) dataset = data.TensorDataset(features, labels) batch_size = 10  data_iter = data.DataLoader(dataset, batch_size, shuffle=True ) from  torch import  nnnet = nn.Sequential(nn.Linear(2 , 1 )) net[0 ].wight.data.normal_(0 , 0.01 ) net[0 ].bias.data.fill(0 ) loss = nn.MESLoss() trainer = nn.optim.SGD(net.parameters(), lr=0.03 ) num_epochs = 3  for  epoch in  range (num_epochs):	 	for  X, y in  data_iter: 		 		l = loss(net(X), y) 		 		trainer.zero_grad() 		 		l.backward() 		 		trainer.step() 	 	l = loss(net(features), labels) 	print ("epoch: {:d}, loss: {:f}" .format (epoch + 1 , l))