Skip to content

Commit 42b94f3

Browse files
authored
Update resnet50.py
1 parent 3994559 commit 42b94f3

File tree

1 file changed

+131
-0
lines changed

1 file changed

+131
-0
lines changed

GPU-Virtual-Service/resnet50.py

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,132 @@
1+
import torch
2+
import torch.nn as nn
3+
import torchvision
4+
from torch.autograd import Variable
5+
import matplotlib.pyplot as plt
6+
import torch.nn.functional as F
7+
import torch.utils.data as Data
8+
import time
19

10+
device = torch.device("cuda")
11+
12+
Epoch=100
13+
Batch_Size=128
14+
LR=0.01
15+
16+
#训练集
17+
trainData=torchvision.datasets.MNIST(
18+
root="./data",
19+
train=True,
20+
transform=torchvision.transforms.ToTensor(),
21+
download=False)
22+
23+
train_loader=Data.DataLoader(dataset=trainData,batch_size=Batch_Size,shuffle=True)
24+
test_data=torchvision.datasets.MNIST(root="./data",train=False,download=False)
25+
26+
test_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)[:5000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
27+
test_y = test_data.targets[:5000]
28+
test_result = test_y
29+
test_x = test_x.to(device)
30+
test_y = test_y.to(device)
31+
32+
#残差块
33+
class ResidualBlock(nn.Module):
34+
def __init__(self,channel):
35+
super(ResidualBlock, self).__init__()
36+
self.channel=channel
37+
self.conv1=nn.Sequential(
38+
nn.Conv2d(in_channels=channel,
39+
out_channels=channel,
40+
kernel_size=3,
41+
stride=1,
42+
padding=1),
43+
nn.BatchNorm2d(channel),
44+
nn.ReLU(inplace=True)
45+
)
46+
self.conv2=nn.Sequential(
47+
nn.Conv2d(channel,channel,kernel_size=3,stride=1,padding=1),
48+
# nn.BatchNorm2d(channel)
49+
)
50+
def forward(self,x):
51+
out=self.conv1(x)
52+
out=self.conv2(out)
53+
out+=x
54+
out=F.relu(out)
55+
return out
56+
57+
#残差网络
58+
class ResNet(nn.Module):
59+
def __init__(self):
60+
super(ResNet, self).__init__()
61+
self.conv1=nn.Sequential(
62+
nn.Conv2d(in_channels=1,out_channels=32,kernel_size=5), #(1,28,28)
63+
nn.BatchNorm2d(32), #(32,24,24)
64+
nn.ReLU(),
65+
nn.MaxPool2d(2) #(32,12,12)
66+
)
67+
self.conv2 = nn.Sequential(
68+
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=5), #(16,8,8)
69+
nn.BatchNorm2d(16),
70+
nn.ReLU(),
71+
nn.MaxPool2d(2) #(16,4,4)
72+
)
73+
self.reslayer1=ResidualBlock(32)
74+
self.reslayer2=ResidualBlock(16)
75+
self.fc=nn.Linear(256,10) #这里的输入256是因为16*4*4=256
76+
77+
def forward(self,x):
78+
out=self.conv1(x)
79+
out=self.reslayer1(out)
80+
out=self.conv2(out)
81+
out=self.reslayer2(out)
82+
out=out.view(out.size(0),-1)
83+
out=self.fc(out)
84+
return out
85+
86+
#关于训练
87+
def Train(Res):
88+
# 损失函数,以及优化器
89+
loss_func = nn.CrossEntropyLoss()
90+
loss_func = loss_func.to(device)
91+
optimizer = torch.optim.Adam(Res.parameters(), lr=LR)
92+
for epoch in range(Epoch):
93+
for step,(b_x,b_y)in enumerate(train_loader):
94+
b_x = b_x.to(device)
95+
b_y = b_y.to(device)
96+
output=Res(b_x)
97+
loss=loss_func(output,b_y)
98+
99+
optimizer.zero_grad()
100+
loss.backward()
101+
optimizer.step()
102+
103+
if(step%50==0):
104+
print('Epoch: ', epoch, '| train loss: %.4f' % loss.item())
105+
torch.save(Res, 'res_minist.pkl')
106+
print('res finish training')
107+
108+
109+
x=torch.randn(16,1,28,28)
110+
res=ResNet()
111+
res=res.to(device)
112+
113+
# 测试
114+
def Restest():
115+
res=torch.load('res_minist.pkl')
116+
res.to(device)
117+
test_output=res(test_x[:20])
118+
test_output = test_output.cpu()
119+
prediction=torch.max(test_output,1)[1].data.numpy()
120+
print(prediction, 'prediction number')
121+
print(test_result[:20].numpy(), 'real number')
122+
123+
"""
124+
test_output1 = res(test_x)
125+
pred_y1 = torch.max(test_output1, 1)[1].data.numpy()
126+
accuracy = float((pred_y1 == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
127+
print('accuracy', accuracy)
128+
"""
129+
130+
if __name__=='__main__':
131+
Train(res)
132+
Restest()

0 commit comments

Comments
 (0)