Skip to content

Commit 0d38a39

Browse files
committed
add chapter 2
1 parent 04d93cd commit 0d38a39

File tree

18 files changed

+1791
-3
lines changed

18 files changed

+1791
-3
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ coverage.xml
6868
.hypothesis/
6969
.pytest_cache/
7070
cover/
71+
pyTorch/BookPyTorch2/chapter2/img/*.jpg
7172

7273
# Translations
7374
*.mo
-3.56 KB
Loading
232 Bytes
Loading

pyTorch/BookPyTorch2/chapter2/train.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@
3131
x_train /= 512.
3232
train_length = len(x_train) * 20 #增加数据的单词循环次数
3333

34-
state_dict = torch.load("./saver/unet.pth")
35-
model.load_state_dict(state_dict)
34+
# state_dict = torch.load("./saver/unet.pth")
35+
# model.load_state_dict(state_dict)
3636
for epoch in range(epochs):
3737
train_num = train_length // batch_size #计算有多少批次数
3838

@@ -45,7 +45,7 @@
4545
for b in range(batch_size):
4646
img = x_train[np.random.randint(x_train.shape[0])] #提取单个图片内容
4747
x = img
48-
y = img
48+
y = img # todo ?? y 应该是 label,这里直接用 img?
4949

5050
x_imgs_batch.append(x)
5151
y_batch.append(y)
@@ -55,6 +55,7 @@
5555
y_batch = torch.tensor(y_batch).float().to(device)
5656

5757

58+
# todo model() 返回的不是 label,而是新的 img
5859
pred = model(x_imgs_batch) #对模型进行正向计算
5960
loss = torch.nn.MSELoss(reduction="sum")(pred, y_batch)*100. #使用损失函数进行计算
6061

-733 KB
Binary file not shown.
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import torch
2+
3+
y = torch.LongTensor([0])
4+
z = torch.Tensor([[0.2,0.1,-0.1]])
5+
criterion = torch.nn.CrossEntropyLoss()
6+
loss = criterion(z,y)
7+
print(loss)
Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
"""Various of network parameter initializers."""
2+
3+
import numpy as np
4+
5+
6+
def get_fans(shape):
7+
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
8+
fan_out = shape[1] if len(shape) == 2 else shape[0]
9+
return fan_in, fan_out
10+
11+
12+
class Initializer:
13+
14+
def __call__(self, shape):
15+
return self.init(shape).astype(np.float32)
16+
17+
def init(self, shape):
18+
raise NotImplementedError
19+
20+
21+
class Normal(Initializer):
22+
23+
def __init__(self, mean=0.0, std=1.0):
24+
self._mean = mean
25+
self._std = std
26+
27+
def init(self, shape):
28+
return np.random.normal(loc=self._mean, scale=self._std, size=shape)
29+
30+
31+
class TruncatedNormal(Initializer):
32+
33+
def __init__(self, low, high, mean=0.0, std=1.0):
34+
self._mean, self._std = mean, std
35+
self._low, self._high = low, high
36+
37+
def init(self, shape):
38+
data = np.random.normal(loc=self._mean, scale=self._std, size=shape)
39+
while True:
40+
mask = (data > self._low) & (data < self._high)
41+
if mask.all():
42+
break
43+
data[~mask] = np.random.normal(loc=self._mean, scale=self._std,
44+
size=(~mask).sum())
45+
return data
46+
47+
48+
class Uniform(Initializer):
49+
50+
def __init__(self, a=0.0, b=1.0):
51+
self._a = a
52+
self._b = b
53+
54+
def init(self, shape):
55+
return np.random.uniform(low=self._a, high=self._b, size=shape)
56+
57+
58+
class Constant(Initializer):
59+
60+
def __init__(self, val):
61+
self._val = val
62+
63+
def init(self, shape):
64+
return np.full(shape=shape, fill_value=self._val)
65+
66+
67+
class Zeros(Constant):
68+
69+
def __init__(self):
70+
super(Zeros, self).__init__(0.0)
71+
72+
73+
class Ones(Constant):
74+
75+
def __init__(self):
76+
super(Ones, self).__init__(1.0)
77+
78+
79+
class XavierUniform(Initializer):
80+
"""
81+
Implement the Xavier method described in
82+
"Understanding the difficulty of training deep feedforward neural networks"
83+
Glorot, X. & Bengio, Y. (2010)
84+
85+
Weights will have values sampled from uniform distribution U(-a, a) where
86+
a = gain * sqrt(6.0 / (num_in + num_out))
87+
88+
"""
89+
90+
def __init__(self, gain=1.0):
91+
self._gain = gain
92+
93+
def init(self, shape):
94+
fan_in, fan_out = get_fans(shape)
95+
a = self._gain * np.sqrt(6.0 / (fan_in + fan_out))
96+
return np.random.uniform(low=-a, high=a, size=shape)
97+
98+
99+
class XavierNormal(Initializer):
100+
"""
101+
Implement the Xavier method described in
102+
"Understanding the difficulty of training deep feedforward neural networks"
103+
Glorot, X. & Bengio, Y. (2010)
104+
105+
Weights will have values sampled from uniform distribution N(0, std) where
106+
std = gain * sqrt(1.0 / (num_in + num_out))
107+
"""
108+
109+
def __init__(self, gain=1.0):
110+
self._gain = gain
111+
112+
def init(self, shape):
113+
fan_in, fan_out = get_fans(shape)
114+
std = self._gain * np.sqrt(2.0 / (fan_in + fan_out))
115+
return np.random.normal(loc=0.0, scale=std, size=shape)
116+
117+
118+
class HeUniform(Initializer):
119+
"""
120+
Implement the He initialization method described in
121+
"Delving deep into rectifiers: Surpassing human-level performance
122+
on ImageNet classification" He, K. et al. (2015)
123+
124+
Weights will have values sampled from uniform distribution U(-a, a) where
125+
a = sqrt(6.0 / num_in)
126+
"""
127+
128+
def __init__(self, gain=1.0):
129+
self._gain = gain
130+
131+
def init(self, shape):
132+
fan_in, _ = get_fans(shape)
133+
a = self._gain * np.sqrt(6.0 / fan_in)
134+
return np.random.uniform(low=-a, high=a, size=shape)
135+
136+
137+
class HeNormal(Initializer):
138+
"""
139+
Implement the He initialization method described in
140+
"Delving deep into rectifiers: Surpassing human-level performance
141+
on ImageNet classification" He, K. et al. (2015)
142+
143+
Weights will have values sampled from normal distribution N(0, std) where
144+
std = sqrt(2.0 / num_in)
145+
"""
146+
147+
def __init__(self, gain=1.0):
148+
self._gain = gain
149+
150+
def init(self, shape):
151+
fan_in, _ = get_fans(shape)
152+
std = self._gain * np.sqrt(2.0 / fan_in)
153+
return np.random.normal(loc=0.0, scale=std, size=shape)

0 commit comments

Comments
 (0)