Skip to content

Commit 921b69a

Browse files
committed
update to torch 0.4
1 parent 7e7c9bb commit 921b69a

15 files changed

Lines changed: 82 additions & 104 deletions

tutorial-contents/301_regression.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,10 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
"""
99
import torch
10-
from torch.autograd import Variable
1110
import torch.nn.functional as F
1211
import matplotlib.pyplot as plt
1312

@@ -17,8 +16,9 @@
1716
y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
1817

1918
# torch can only train on Variable, so convert them to Variable
20-
#x, y = Variable(x), Variable(y)
21-
#The above code is depricated. Now,autograd directly supports tensors
19+
# The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
20+
# x, y = Variable(x), Variable(y)
21+
2222
# plt.scatter(x.data.numpy(), y.data.numpy())
2323
# plt.show()
2424

@@ -56,7 +56,7 @@ def forward(self, x):
5656
plt.cla()
5757
plt.scatter(x.data.numpy(), y.data.numpy())
5858
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
59-
plt.text(0.5, 0, 'Loss=%.4f' % loss.data[0], fontdict={'size': 20, 'color': 'red'})
59+
plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
6060
plt.pause(0.1)
6161

6262
plt.ioff()

tutorial-contents/302_classification.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,10 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
"""
99
import torch
10-
from torch.autograd import Variable
1110
import torch.nn.functional as F
1211
import matplotlib.pyplot as plt
1312

@@ -22,8 +21,8 @@
2221
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
2322
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
2423

25-
# torch can only train on Variable, so convert them to Variable
26-
x, y = Variable(x), Variable(y)
24+
# The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
25+
# x, y = Variable(x), Variable(y)
2726

2827
# plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
2928
# plt.show()

tutorial-contents/304_save_reload.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,20 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
"""
99
import torch
10-
from torch.autograd import Variable
1110
import matplotlib.pyplot as plt
1211

1312
# torch.manual_seed(1) # reproducible
1413

1514
# fake data
1615
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
1716
y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
18-
x, y = Variable(x, requires_grad=False), Variable(y, requires_grad=False)
17+
18+
# The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
19+
# x, y = Variable(x, requires_grad=False), Variable(y, requires_grad=False)
1920

2021

2122
def save():

tutorial-contents/306_optimizer.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
"""
99
import torch
1010
import torch.utils.data as Data
1111
import torch.nn.functional as F
12-
from torch.autograd import Variable
1312
import matplotlib.pyplot as plt
1413

1514
# torch.manual_seed(1) # reproducible
@@ -27,7 +26,7 @@
2726
plt.show()
2827

2928
# put dateset into torch dataset
30-
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
29+
torch_dataset = Data.TensorDataset(x, y)
3130
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)
3231

3332

@@ -64,17 +63,14 @@ def forward(self, x):
6463
# training
6564
for epoch in range(EPOCH):
6665
print('Epoch: ', epoch)
67-
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
68-
b_x = Variable(batch_x)
69-
b_y = Variable(batch_y)
70-
66+
for step, (b_x, b_y) in enumerate(loader): # for each training step
7167
for net, opt, l_his in zip(nets, optimizers, losses_his):
7268
output = net(b_x) # get output for every net
7369
loss = loss_func(output, b_y) # compute loss for every net
7470
opt.zero_grad() # clear gradients for next train
7571
loss.backward() # backpropagation, compute gradients
7672
opt.step() # apply gradients
77-
l_his.append(loss.data[0]) # loss recoder
73+
l_his.append(loss.data.numpy()) # loss recoder
7874

7975
labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
8076
for i, l_his in enumerate(losses_his):

tutorial-contents/401_CNN.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
torchvision
88
matplotlib
99
"""
@@ -14,7 +14,6 @@
1414
# third-party library
1515
import torch
1616
import torch.nn as nn
17-
from torch.autograd import Variable
1817
import torch.utils.data as Data
1918
import torchvision
2019
import matplotlib.pyplot as plt
@@ -51,9 +50,9 @@
5150
# Data Loader for easy mini-batch return in training, the image batch shape will be (50, 1, 28, 28)
5251
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
5352

54-
# convert test data into Variable, pick 2000 samples to speed up testing
53+
# pick 2000 samples to speed up testing
5554
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
56-
test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1), volatile=True).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
55+
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
5756
test_y = test_data.test_labels[:2000]
5857

5958

@@ -106,9 +105,7 @@ def plot_with_labels(lowDWeights, labels):
106105
plt.ion()
107106
# training and testing
108107
for epoch in range(EPOCH):
109-
for step, (x, y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
110-
b_x = Variable(x) # batch x
111-
b_y = Variable(y) # batch y
108+
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
112109

113110
output = cnn(b_x)[0] # cnn output
114111
loss = loss_func(output, b_y) # cross entropy loss
@@ -119,8 +116,8 @@ def plot_with_labels(lowDWeights, labels):
119116
if step % 50 == 0:
120117
test_output, last_layer = cnn(test_x)
121118
pred_y = torch.max(test_output, 1)[1].data.squeeze()
122-
accuracy = sum(pred_y == test_y) / float(test_y.size(0))
123-
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data[0], '| test accuracy: %.2f' % accuracy)
119+
accuracy = float(sum(pred_y == test_y)) / float(test_y.size(0))
120+
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
124121
if HAS_SK:
125122
# Visualization of trained flatten layer (T-SNE)
126123
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)

tutorial-contents/402_RNN_classifier.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
torchvision
99
"""
1010
import torch
1111
from torch import nn
12-
from torch.autograd import Variable
1312
import torchvision.datasets as dsets
1413
import torchvision.transforms as transforms
1514
import matplotlib.pyplot as plt
@@ -47,7 +46,7 @@
4746

4847
# convert test data into Variable, pick 2000 samples to speed up testing
4948
test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
50-
test_x = Variable(test_data.test_data, volatile=True).type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1)
49+
test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1)
5150
test_y = test_data.test_labels.numpy().squeeze()[:2000] # covert to numpy array
5251

5352

@@ -84,9 +83,8 @@ def forward(self, x):
8483

8584
# training and testing
8685
for epoch in range(EPOCH):
87-
for step, (x, y) in enumerate(train_loader): # gives batch data
88-
b_x = Variable(x.view(-1, 28, 28)) # reshape x to (batch, time_step, input_size)
89-
b_y = Variable(y) # batch y
86+
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
87+
b_x = b_x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size)
9088

9189
output = rnn(b_x) # rnn output
9290
loss = loss_func(output, b_y) # cross entropy loss
@@ -97,8 +95,8 @@ def forward(self, x):
9795
if step % 50 == 0:
9896
test_output = rnn(test_x) # (samples, time_step, input_size)
9997
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
100-
accuracy = sum(pred_y == test_y) / float(test_y.size)
101-
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data[0], '| test accuracy: %.2f' % accuracy)
98+
accuracy = float(sum(pred_y == test_y)) / float(test_y.size)
99+
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
102100

103101
# print 10 predictions from test data
104102
test_output = rnn(test_x[:10].view(-1, 28, 28))

tutorial-contents/403_RNN_regressor.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
numpy
99
"""
1010
import torch
1111
from torch import nn
12-
from torch.autograd import Variable
1312
import numpy as np
1413
import matplotlib.pyplot as plt
1514

@@ -69,19 +68,19 @@ def forward(self, x, h_state):
6968
plt.figure(1, figsize=(12, 5))
7069
plt.ion() # continuously plot
7170

72-
for step in range(60):
71+
for step in range(100):
7372
start, end = step * np.pi, (step+1)*np.pi # time range
7473
# use sin predicts cos
7574
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
7675
x_np = np.sin(steps) # float32 for converting torch FloatTensor
7776
y_np = np.cos(steps)
7877

79-
x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis])) # shape (batch, time_step, input_size)
80-
y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]))
78+
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size)
79+
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
8180

8281
prediction, h_state = rnn(x, h_state) # rnn output
8382
# !! next step is important !!
84-
h_state = Variable(h_state.data) # repack the hidden state, break the connection from last iteration
83+
h_state = h_state.data # repack the hidden state, break the connection from last iteration
8584

8685
loss = loss_func(prediction, y) # cross entropy loss
8786
optimizer.zero_grad() # clear gradients for this training step

tutorial-contents/404_autoencoder.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
44
55
Dependencies:
6-
torch: 0.1.11
6+
torch: 0.4
77
matplotlib
88
numpy
99
"""
1010
import torch
1111
import torch.nn as nn
12-
from torch.autograd import Variable
1312
import torch.utils.data as Data
1413
import torchvision
1514
import matplotlib.pyplot as plt
@@ -87,15 +86,14 @@ def forward(self, x):
8786
plt.ion() # continuously plot
8887

8988
# original data (first row) for viewing
90-
view_data = Variable(train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.)
89+
view_data = train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.
9190
for i in range(N_TEST_IMG):
9291
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(())
9392

9493
for epoch in range(EPOCH):
95-
for step, (x, y) in enumerate(train_loader):
96-
b_x = Variable(x.view(-1, 28*28)) # batch x, shape (batch, 28*28)
97-
b_y = Variable(x.view(-1, 28*28)) # batch y, shape (batch, 28*28)
98-
b_label = Variable(y) # batch label
94+
for step, (x, b_label) in enumerate(train_loader):
95+
b_x = x.view(-1, 28*28) # batch x, shape (batch, 28*28)
96+
b_y = x.view(-1, 28*28) # batch y, shape (batch, 28*28)
9997

10098
encoded, decoded = autoencoder(b_x)
10199

@@ -105,7 +103,7 @@ def forward(self, x):
105103
optimizer.step() # apply gradients
106104

107105
if step % 100 == 0:
108-
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data[0])
106+
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy())
109107

110108
# plotting decoded image (second row)
111109
_, decoded_data = autoencoder(view_data)
@@ -119,7 +117,7 @@ def forward(self, x):
119117
plt.show()
120118

121119
# visualize in 3D plot
122-
view_data = Variable(train_data.train_data[:200].view(-1, 28*28).type(torch.FloatTensor)/255.)
120+
view_data = train_data.train_data[:200].view(-1, 28*28).type(torch.FloatTensor)/255.
123121
encoded_data, _ = autoencoder(view_data)
124122
fig = plt.figure(2); ax = Axes3D(fig)
125123
X, Y, Z = encoded_data.data[:, 0].numpy(), encoded_data.data[:, 1].numpy(), encoded_data.data[:, 2].numpy()

tutorial-contents/405_DQN_Reinforcement_learning.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,12 @@
44
More about Reinforcement learning: https://morvanzhou.github.io/tutorials/machine-learning/reinforcement-learning/
55
66
Dependencies:
7-
torch: 0.3
7+
torch: 0.4
88
gym: 0.8.1
99
numpy
1010
"""
1111
import torch
1212
import torch.nn as nn
13-
from torch.autograd import Variable
1413
import torch.nn.functional as F
1514
import numpy as np
1615
import gym
@@ -55,7 +54,7 @@ def __init__(self):
5554
self.loss_func = nn.MSELoss()
5655

5756
def choose_action(self, x):
58-
x = Variable(torch.unsqueeze(torch.FloatTensor(x), 0))
57+
x = torch.unsqueeze(torch.FloatTensor(x), 0)
5958
# input only one sample
6059
if np.random.uniform() < EPSILON: # greedy
6160
actions_value = self.eval_net.forward(x)
@@ -82,10 +81,10 @@ def learn(self):
8281
# sample batch transitions
8382
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
8483
b_memory = self.memory[sample_index, :]
85-
b_s = Variable(torch.FloatTensor(b_memory[:, :N_STATES]))
86-
b_a = Variable(torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int)))
87-
b_r = Variable(torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2]))
88-
b_s_ = Variable(torch.FloatTensor(b_memory[:, -N_STATES:]))
84+
b_s = torch.FloatTensor(b_memory[:, :N_STATES])
85+
b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
86+
b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])
87+
b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])
8988

9089
# q_eval w.r.t the action in experience
9190
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)

0 commit comments

Comments
 (0)