diff --git a/assignment-2/submission/16300110008/README.md b/assignment-2/submission/16300110008/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..36bd3497b1a780fc6ef92ea21a8f530133dd2d35
--- /dev/null
+++ b/assignment-2/submission/16300110008/README.md
@@ -0,0 +1,757 @@
+# 课程报告
+
+这是一个有关numpy实现前馈神经网络的实验报告,代码保存在numpy_fnn.py中,实验结果展示使用了matplotlib库。
+
+## 一、FNN算子的numpy实现
+
+### 1. Matmul
+
+Matmul是矩阵之间的乘法,输入为两个矩阵 $X$ 和 $W$ ,其中 $X\in R^{N\times d}$ , $W\in R^{d\times d'}$ ,运算结果为 $Z\in R^{N \times d'}$ 。$Z$中的每个元素 $Z_{ij}$ 由下式得到:
+$$
+\begin{equation}
+Z_{ij} = \sum_{k=1}^d X_{ik} W_{kj}
+\tag{1-1}
+\end{equation}
+$$
+用矩阵形式可以表示为:
+$$
+\begin{equation}
+Z = XW
+\tag{1-2}
+\end{equation}
+$$
+事实上,numpy提供了成熟的矩阵乘法计算函数,我们可以直接调用`numpy.matmul(X, W)`或者`numpy.dot(X, W)`使用。反向传播上,假设损失函数 $\mathcal{L}(y,\hat{y})$ 到当前计算的误差项为$\delta$ ,则其关于 $W_{ij}$ 的梯度为:
+$$
+\begin{equation}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial W_{ij}} = \sum_{n=1}^{N}
+\frac{\partial Z_{nj}}{\partial W_{ij}}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial Z_{nj}}
+\tag{1-3}
+\end{equation}
+$$
+其中,根据 $(1-1)$ 式有:
+$$
+\begin{aligned}
+\frac {\partial Z _ {kj}} {\partial W _ {ij}} & = X_{ni}
+\end {aligned} \tag {1-4}
+$$
+
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial Z_{nj}}=\delta_{nj}
+\end{aligned} \tag{1-5}
+$$
+
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial W_{ij}} &= \sum_{n=1}^{N}
+\frac{\partial Z_{nj}}{\partial W_{ij}}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial Z_{nj}}\\\\
+&=\sum_{n=1}^{N} X_{ni} \delta_{nj}
+\end{aligned}
+\tag{1-6}
+$$
+
+
+
+用矩阵形式可以写作:
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial W} &=
+X^T \delta
+\end{aligned}
+\tag{1-7}
+$$
+对 $X_{ij}$ 的梯度为:
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X_{ij}} &= \sum_{m=1}^{d'}
+\frac{\partial Z_{im}}{\partial X_{ij}}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial Z_{im}} \\\\
+&= \sum_{m=1}^{d'}
+W_{jm} \delta_{im}
+\end{aligned}
+\tag{1-8}
+$$
+用矩阵形式可以写作:
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X} &=
+\delta W^T
+\end{aligned}
+\tag{1-9}
+$$
+
+具体代码为:
+
+```python
+ def backward(self, grad_y):
+ """
+ grad_y: shape(N, d')
+ """
+ x = self.memory['x']
+ W = self.memory['W']
+ grad_W = np.matmul(x.T, grad_y)
+ grad_x = np.matmul(grad_y, W.T)
+
+ return grad_x, grad_W
+```
+
+### 2. ReLU
+
+ReLU函数是一类非线性激活函数,数学形式为 $ReLU(x)=max(0,x)$ 。因此,在 $x>0$ 的地方,梯度为1,其他地方梯度为0,在 $x=0$ 处,函数不可导,这时引入次梯度,使得该处的梯度为0,因此ReLU函数的反向传播可以写作:
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X_{ij}} &=
+\begin{cases}
+1, & \text{if $$x>0$$} \\\\
+0, & \text{o.w.}
+\end{cases}
+\end{aligned}
+\tag{2-1}
+$$
+
+
+矩阵形式下可以引入一个矩阵 $S$ 用来标记 $X$ 中大于0的元素的位置,其中大于0的位置数值为1,否则为0,则反向传播可以写作:
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X} &=
+S \odot\delta
+\end{aligned}
+\tag{2-2}
+$$
+
+代码为:
+
+```python
+def backward(self, grad_y):
+ """
+ grad_y: same shape as x
+ """
+ x = self.memory['x']
+ # 使用np.where起公式中S过滤矩阵的作用
+ grad_x = np.where(x > 0, grad_y, np.zeros_like(grad_y))
+
+ return grad_x
+```
+
+### 3. Log
+
+假设 $y=log(x + \epsilon)$ ,那么
+$$
+\frac{dy}{dx}=\frac{1}{x + \epsilon}
+\tag{3-1}
+$$
+
+假设 $\frac{1}{X}$ 表示对矩阵 $X$ 中的元素按位取倒数得到的矩阵,则有
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X} &=
+\left[
+\begin{matrix}
+\frac{\delta_{ij}}{X_{ij} + \epsilon}
+\end{matrix}
+\right]_{N \times d}\\\\
+&=
+\delta \odot \frac{1}{X}
+\end{aligned}
+\tag{3-2}
+$$
+
+代码为:
+
+```python
+def backward(self, grad_y):
+ """
+ grad_y: same shape as x
+ """
+ x = self.memory['x']
+ grad_x = 1 / (x + self.epsilon) * grad_y
+
+ return grad_x
+```
+
+### 4. softmax
+
+softmax是相对于argmax而言的,其优势是可导,具体形式为:
+$$
+softmax({\pmb x})=
+\left[
+\begin{matrix}
+\frac{exp(x_1)}{\sum_{c=1}^C exp(x_c)} & \cdots & \frac{exp(x_C)}{\sum_{c=1}^C exp(x_c)}
+\end{matrix}
+\right]
+\tag{4-1}
+$$
+在实际计算中,如果 $x_i$ 的值过大,很容易导致数值上溢,因此在计算时,可以在分子分母上同时除以最大值 $exp(x_{max})$ ,可以在保证数值不变的情况下防止上溢:
+$$
+softmax({\pmb x})=
+\left[
+\begin{matrix}
+\frac{exp(x_1-x_{max})}{\sum_{c=1}^C exp(x_c-x_{max})} & \cdots & \frac{exp(x_C-x_{max} )}{\sum_{c=1}^C exp(x_c-x_{max} )}
+\end{matrix}
+\right]
+\tag{4-2}
+$$
+反向传播时, $\frac {\partial softmax(x_i)} {\partial x_j}$ 需要分类讨论 $x_i$ 的情况,当 $i=j$ 时:
+$$
+\begin{aligned}
+\frac {\partial softmax(x_i)} {\partial x_j} &=
+\frac{\partial}{\partial x_j}(\frac{exp(x_j-x_{max})}{\sum_{c=1}^C exp(x_c-x_{max})}) \\\\
+&=
+\frac{\partial}{\partial x_j}(\frac{exp(x_j)}{\sum_{c=1}^C exp(x_c)})\\\\
+&=
+\frac{\frac{\partial exp(x_j)}{\partial x_j}\sum_{c=1}^C exp(x_c) - exp(x_j)\frac{\partial \sum_{c=1}^C exp(x_c)}{\partial x_j}}{(\sum_{c=1}^C exp(x_c))^2}\\\\
+&=
+\frac{exp(x_j)\sum_{c=1}^C exp(x_c) - exp(x_j)^2}{(\sum_{c=1}^C exp(x_c))^2}\\\\
+&=
+softmax(\pmb x)_j - (softmax(\pmb x)_j)^2\\\\
+&=softmax(\pmb x)_j(1-softmax(\pmb x)_j)
+\end{aligned}
+\tag{4-3}
+$$
+
+当 $i\neq j$ 时:
+$$
+\begin{aligned}
+\frac {\partial softmax(x_i)} {\partial x_j}
+&=
+\frac{\partial}{\partial x_j}(\frac{exp(x_i)}{\sum_{c=1}^C exp(x_c)})\\\\
+&=
+\frac{0- exp(x_i)\frac{\partial \sum_{c=1}^C exp(x_c)}{\partial x_j}}{(\sum_{c=1}^C exp(x_c))^2}\\\\
+&=
+\frac{- exp(x_i)exp(x_j)}{(\sum_{c=1}^C exp(x_c))^2}\\\\
+&=
+-softmax(\pmb x)_i softmax(\pmb x)_j
+\end{aligned}
+\tag{4-4}
+$$
+用矩阵形式表示:
+$$
+\begin{aligned}
+\frac {\partial softmax(\pmb x)} {\partial {\pmb x}} &=\left[
+\begin{matrix}
+softmax(\pmb x)_1(1-softmax(\pmb x)_1) & \cdots& -softmax(\pmb x)_1 softmax(\pmb x)_C \\\\
+\vdots & \ddots & \vdots\\\\
+-softmax(\pmb x)_C softmax(\pmb x)_1 & \cdots& softmax(\pmb x)_C(1-softmax(\pmb x)_C)
+\end{matrix}
+\right] \\\\
+&=diag(softmax(\pmb x)) - softmax(\pmb x)softmax(\pmb x)^T
+\end{aligned}
+\tag{4-5}
+$$
+
+其中 $diag(softmax(\pmb x))$ 指的是将 $softmax(\pmb x)$ 元素按顺序排列在对角线上的对角阵,假设误差项为 $\delta \in R^{C}$ ,则:
+$$
+\begin{aligned}
+\frac{\partial \mathcal{L}(y,\hat{y})}{\partial \pmb x} &=
+\frac {\partial \mathcal{L}(y,\hat{y})} {\partial softmax(\pmb x)} \frac {\partial softmax(\pmb x)} {\partial {\pmb x}}\\\\
+&=\delta(diag(softmax(\pmb x)) - softmax(\pmb x)^Tsoftmax(\pmb x))
+\end{aligned}
+\tag{4-6}
+$$
+需要注意,这里的 $softmax(\pmb x)$ 是一个行向量。在numpy实现上,我们可以对 $X$ 矩阵的每一行进行softmax操作,从而得到每个样本在每个类上的预测值,记得到的结果为矩阵 $\hat Y \in R^{N \times C}$。反向传播时,我们需要将 $\hat Y$ 扩充为 $R^{N \times 1 \times C}$,并使用`numpy.matmul()`方法在最后两维上计算 $\hat Y$ 对 $X$ 的导数 $dX \in R^{N \times C \times C}$ ,在此基础上,将误差项 $\delta \in R^{N \times C}$扩充为 $R^{N \times 1 \times C}$,也在后两维上与 $dX$ 进行乘法,从而得到 $\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X}_{temp} \in R^{N \times 1 \times C}$ ,此时再将中间的维度压缩掉,就可以得到最后的结果:$\frac{\partial \mathcal{L}(y,\hat{y})}{\partial X} \in R^{N \times C}$ 。
+
+代码为:
+
+```python
+def backward(self, grad_y):
+ """
+ grad_y: same shape as x
+ """
+ N, C = grad_y.shape
+ A = self.memory['A']
+ # 扩充激活值的维度,计算softmax对X导数dX
+ temp = A[:, np.newaxis, :] * np.eye(C) - np.matmul(A[:, np.newaxis, :].transpose(0, 2, 1), A[:, np.newaxis, :])
+ # 扩充误差项与dX进行乘法
+ grad_x = np.matmul(grad_y[:, np.newaxis, :], temp).squeeze(1)
+
+ return grad_x
+```
+
+## 二、模型训练与测试
+
+### 1. 学习率调整
+
+笔者分别使用了1、0.1、0.01、0.001的学习率进行了实验,在训练集上的损失如下:
+
+
+
+
+
+ 图1 lr=0.1
+ |
+
+
+ 图2 lr=0.01
+ |
+
+
+ 图3 lr=0.001 |
+ 图4 lr=0.0001 |
+
+
+
+
+可以看到,对于这个数据集而言,使用较大的学习率能够更快地学习,比如当学习率为1时,大约200个step的训练就能让损失收敛,而当学习率为0.001时,进行了1400个step后损失仍然没有收敛
+
+在测试集上,正确率如下:
+
+| 学习率 | epoch=1 accuracy | epoch=2 accuracy | epoch=3 accuracy |
+| ------ | ---------------- | ---------------- | ---------------- |
+| 0.1 | *0.9511* | *0.9631* | *0.9719* |
+| 0.01 | *0.9150* | *0.9150* | *0.9439* |
+| 0.001 | *0.7426* | *0.8251* | *0.8561* |
+| 0.0001 | *0.2657* | *0.3798* | *0.4734* |
+
+从结果上看,测试集上虽然没有出现过拟合现象,但是当学习率过小时存在欠拟合现象。正常的loss曲线应当呈陡峭的“L”型,如果呈现出突起或平缓,则说明当前模型的拟合能力不佳,存在欠拟合。
+
+### 2. 调整epoch
+
+笔者将学习率设定为0.1进行了50个epoch的训练,实验结果如下图所示:
+
+
+
+
+
+
+这三张图从左到右分别展示了训练集损失、测试集准确率与测试集损失的变化情况,经过约10000个step的训练,训练集上的损失基本可以收敛到0,测试集上的准确率虽然也在不断地升高,但是在10个epoch以后,训练集损失开始上升,说明已经出现了过拟合现象,可以采用early stopping的方法缓解这一问题即当监测当验证集损失上升时停止训练,从而获得一个具有良好泛化性能的模型。
+
+### 3. 调整隐藏层神经元数量
+
+下面的实验中,笔者调整了中间两个隐藏层的参数量,比较神经元数量对模型的影响,优化器为SGD,学习率为0.1,实验结果如下:
+
+
+
+
+
+ 图1 (254,64)
+ |
+
+
+ 图2 (512,128)
+ |
+
+
+ 图3 (1280,320)
+ |
+
+
+
+
+ 图4 acc (254,64)
+ |
+
+
+ 图5 acc (512,128)
+ |
+
+
+ 图6 acc (1280,320)
+ |
+
+
+
+从实验结果来看,使用较多的隐藏层神经元个数,能够有效地加快训练集损失收敛的速度,并且能够保证训练集损失随batch的不同产生的振荡保持在较小的区间内。但是从测试集的正确率来看,使用神经元数量较多的模型并不能显著提升模型的分类能力,这可能是由于模型能力过强从而产生了微弱的过拟合现象,使得训练上的性能不随训练集同步增长。
+
+### 4.数据增强
+
+笔者使用了简单的数据增强手段,即在图像上追加高斯噪声,代码如下:
+
+```python
+def gaussian_noise(img, mu=0.1307, std=0.3081):
+ """
+ 产生随机噪声,噪声的均值与方差与图像采样数据一致
+ :param img: 待处理的数据
+ :param mu: 噪声均值
+ :param std: 噪声方差
+ :return: 经过噪声处理后的图像
+ """
+ epsilon = 1
+ sigma = std
+ noise = np.random.normal(mu, sigma, img.shape) * epsilon
+ # 设置阈值
+ out = np.clip(img + noise, -1, 1)
+ return out
+```
+
+笔者使用了同样的模型结构与超参数,探究了噪声对模型性能的影响,实验结果如下:
+
+
+
+
+
+ |
+
+
+ |
+
+
+
+
+ 图1 train_with_noise
+ |
+
+ 图2 train_without_noise
+ |
+
+
+
+从下面的结果来看,使用带有噪声的数据进行训练,对模型在无噪声数据上的表现有些许的影响,但是在带有噪声的标签上表现较好;不适用带有噪声的数据则会使模型遇到噪声时表现不佳,准确率振荡相当严重。通过实验可以发现,使用数据增强,能够有效地提高模型的鲁棒性,防止模型遇到干扰时表现不佳。
+
+## 三、mini_batch的numpy实现
+
+代码如下
+
+```python
+def mini_batch_numpy(dataset, batch_size=128):
+ # 标准化处理
+ X = dataset.data.numpy() / 255 # mean=(0.1307,), std=(0.3081,)
+ mean = 0.1307
+ std = 0.3081
+ X = (X - mean) / std
+ y = dataset.targets.numpy()
+
+ # 打乱数据集
+ n = X.shape[0]
+ idx = np.arange(n)
+ np.random.shuffle(idx)
+ X = X[idx]
+ y = y[idx]
+
+ # 切分数据集
+ iter_num = int(np.ceil(n / batch_size))
+ dataloader =
+ [(X[i * batch_size: (i + 1) * batch_size], y[i * batch_size: (i + 1) * batch_size])
+ if (i + 1) * batch_size <= n
+ else (X[i * batch_size:], y[i * batch_size:])
+ for i in range(iter_num)]
+ return dataloader
+```
+
+`mini_batch()`函数应当完成以下功能:
+
+1. 将数据映射到 $(0,1)$ 之间并做标准化处理,预先设定的均值为0.1307,标准差为0.3081
+2. 打乱数据集
+3. 将数据切分为mini batch
+
+## 四、优化方法比较
+
+下面先对常见的优化方法进行介绍
+
+### 1. momentum
+
+momentum也叫动量梯度下降,其原理是每次更新参数时,不直接在参数上更新当前梯度值,而是更新梯度的移动平均,可以表示如下:
+$$
+\begin{aligned}
+V_{\nabla W_t} & \gets \beta V_{\nabla W_{t-1}} + (1-\beta)\nabla W_t\\\\
+V_{\nabla b_t} & \gets \beta V_{\nabla b_{t-1}} + (1-\beta)\nabla b_t\\\\
+W_t &\gets W_{t-1} - \alpha V_{\nabla W_t}\\\\
+b_t &\gets b_{t-1} - \alpha V_{\nabla b_t}
+\end{aligned}
+\tag{4-1-1}
+$$
+其中, $V$ 是动量, $\alpha$ 为学习率。动量可以这样理解,本次参数更新的量变为当前梯度下降量与之前部分梯度下降量的矢量和。可以在当前梯度下降量与之前梯度下降量方向大致相同时起到稳定的作用,而在方向相背时起到减速的作用,从而使梯度下降更为平缓。一般而言,动量梯度下降可以搭配更大的学习率使用。
+
+
+
+上图中,黑色实线箭头代表此前梯度下降量,蓝色和红色实线箭头分别表示与之前部分下降量相同或者相反,蓝色和红色虚线箭头则表示经过动量调整后的下降量。可以看出,动量梯度下降确实起到了保证梯度下降方向稳定、减少震荡的作用。
+
+代码为:
+
+```python
+# optim:Momentum
+# beta 一般取0.9
+beta = h_params[0]
+# 获取上一时刻的动量,初始值为0
+V1, V2, V3 = self.memory.get('V1', 0), self.memory.get('V2', 0), self.memory.get('V3', 0)
+# 更新动量
+V1 = beta * V1 + (1 - beta) * self.W1_grad
+V2 = beta * V2 + (1 - beta) * self.W2_grad
+V3 = beta * V3 + (1 - beta) * self.W3_grad
+# 存储当前动量
+self.memory['V1'] = V1
+self.memory['V2'] = V2
+self.memory['V3'] = V3
+# 更新参数
+self.W1 -= learning_rate * V1
+self.W2 -= learning_rate * V2
+self.W3 -= learning_rate * V3
+```
+
+### 2. AdaGrad
+
+AdaGrad是一种自适应学习率优化器,其提出的动机是此前对变量的更新往往使用相同的学习率,忽略了不同变量在重要性和分布上的差异。所以AdaGrad能够根据参数梯度的变化情况对应地调整学习率,使得梯度越大的参数学习率衰减得越快,形象地说,AdaGrad能够让梯度在较为“陡峭”的方向更新地更慢,具体的办法是将当前梯度除以其历史值的平方和的平方根,以参数W为例可以表示如下:
+$$
+\begin{aligned}
+\pmb g &\gets \nabla W \\\\
+\pmb r &\gets \pmb r + \pmb g \odot \pmb g \\\\
+W &\gets W - \frac{\alpha}{\epsilon + \sqrt{\pmb r}} \odot \pmb g
+\end{aligned}
+\tag{4-2-1}
+$$
+其中 $\alpha$ 为学习率, $\epsilon$ 是一个很小的常量,一般取 $10^{-7}$ , $\pmb r$ 的初始值一般设为0。
+
+代码为:
+
+```python
+# optim:AdaGrad
+# 学习率大于1e-2以后梯度会消失
+epsilon = 1e-7
+# 读取历史梯度值平方和
+r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+# 更新历史梯度值平方和
+r1 += np.square(self.W1_grad)
+r2 += np.square(self.W2_grad)
+r3 += np.square(self.W3_grad)
+# 存储历史梯度值平方和
+self.memory['r1'] = r1
+self.memory['r2'] = r2
+self.memory['r3'] = r3
+# 更新参数
+self.W1 -= learning_rate / (epsilon + np.sqrt(r1)) * self.W1_grad
+self.W2 -= learning_rate / (epsilon + np.sqrt(r2)) * self.W2_grad
+self.W3 -= learning_rate / (epsilon + np.sqrt(r3)) * self.W3_grad
+```
+
+### 3.RMSProp
+
+虽然AdaGrad能够自适应调整参数的学习率,但是有的时候会导致学习率衰减过快。RMSProp的提出正是为了解决这一问题,其在AdaGrad的基础上,将梯度的累积变为其指数移动平均数,从而能够使学习率衰减的不那么剧烈,其中 $\pmb r$ 的更新方式如下:
+$$
+\begin{aligned}
+\pmb r &\gets \rho \pmb r + (1-\rho)\pmb g \odot \pmb g
+\end{aligned}
+\tag{4-3-1}
+$$
+其他变量的更新与AdaGrad一致。一般 $\epsilon$ 取 $10^{-6}$ ,$\rho$ 可以取0.999。
+
+```python
+# optim:RMSProp
+# lr 1e-3, rho 0.999
+epsilon = 1e-6
+rho = h_params[0]
+# 读取历史梯度值平方和
+r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+# 更新历史梯度值平方和
+r1 = rho * r1 + (1 - rho) * np.square(self.W1_grad)
+r2 = rho * r2 + (1 - rho) * np.square(self.W2_grad)
+r3 = rho * r3 + (1 - rho) * np.square(self.W3_grad)
+# 存储历史梯度值平方和
+self.memory['r1'] = r1
+self.memory['r2'] = r2
+self.memory['r3'] = r3
+# 更新参数
+self.W1 -= learning_rate / (epsilon + np.sqrt(r1)) * self.W1_grad
+self.W2 -= learning_rate / (epsilon + np.sqrt(r2)) * self.W2_grad
+self.W3 -= learning_rate / (epsilon + np.sqrt(r3)) * self.W3_grad
+```
+
+### 4. Adam
+
+Adam在RMSProp的基础上又前进了一步,即将Momentum方法与RMSProp结合到一起但对动量的估计值进行了修正,从而能够让模型在初始训练阶段就能迅速地收敛,可以表示如下:
+$$
+\begin{aligned}
+\pmb g &\gets \nabla W \\\\
+\pmb s &\gets \rho_1 \pmb s + (1 - \rho_1) \pmb g \\\\
+\pmb r &\gets \rho_2 \pmb r + (1 - \rho_2) \pmb g \odot \pmb g \\\\
+\hat{\pmb s} &\gets \frac{\pmb s}{1 - \rho^t_1} \\\\
+\hat{\pmb r} &\gets \frac{\pmb r}{1 - \rho^t_2} \\\\
+W &\gets W - \alpha \frac{\hat{\pmb s}}{\epsilon + \sqrt{\hat{\pmb r}}}
+\end{aligned}
+\tag{4-4-1}
+$$
+其中,t是训练的step数,初始值为0, $\pmb s$ 和 $\pmb r$ 初始值为0, $\rho_1$ 和 $\rho_2$ 一般取值为0.9和0.999,学习率一般设为0.001。
+
+ 一般在训练时,可以先用Adam训练,再更换为SGD训练,从能让模型在训练前期和后期都能保持良好的收敛速度。
+
+```python
+# optim:Adam
+# lr=1e-3, rho1=0.9, rho2=0.999
+epsilon = 1e-8
+rho1, rho2 = h_params[0], h_params[1]
+# 确定当前时刻值
+t = self.memory.get('t', 0)
+t += 1
+# 读取历史梯度值平方和以及历史动量,初始值均为0
+s1, s2, s3 = self.memory.get('s1', 0), self.memory.get('s2', 0), self.memory.get('s3', 0)
+r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+# 更新动量
+s1 = rho1 * s1 + (1 - rho1) * self.W1_grad
+s2 = rho1 * s2 + (1 - rho1) * self.W2_grad
+s3 = rho1 * s3 + (1 - rho1) * self.W3_grad
+# 存储动量
+self.memory['s1'] = s1
+self.memory['s2'] = s2
+self.memory['s3'] = s3
+# 更新梯度值平方和
+r1 = rho2 * r1 + (1 - rho2) * np.square(self.W1_grad)
+r2 = rho2 * r2 + (1 - rho2) * np.square(self.W2_grad)
+r3 = rho2 * r3 + (1 - rho2) * np.square(self.W3_grad)
+# 存储梯度值平方和
+self.memory['r1'] = r1
+self.memory['r2'] = r2
+self.memory['r3'] = r3
+# 修正当前动量
+s1_hat = s1 / (1 - np.power(rho1, t))
+s2_hat = s2 / (1 - np.power(rho1, t))
+s3_hat = s3 / (1 - np.power(rho1, t))
+# 修正当前梯度平方和
+r1_hat = r1 / (1 - np.power(rho2, t))
+r2_hat = r2 / (1 - np.power(rho2, t))
+r3_hat = r3 / (1 - np.power(rho2, t))
+# 更新梯度
+self.W1 -= learning_rate / (epsilon + np.sqrt(r1_hat)) * s1_hat
+self.W2 -= learning_rate / (epsilon + np.sqrt(r2_hat)) * s2_hat
+self.W3 -= learning_rate / (epsilon + np.sqrt(r3_hat)) * s3_hat
+```
+
+### 5. 对比实验
+
+不同的优化器对于学习率的敏感性不同,因此笔者分别采用各优化器的常用学习率进行比较实验,下表中记录的是实验所用的参数:
+
+| 优化器 | 学习率 | 参数 |
+| -------- | ------ | ------------------------- |
+| SGD | 0.1 | None |
+| Momentum | 0.1 | $\beta=0.9$ |
+| AdaGrad | 0.01 | None |
+| RMSProp | 0.001 | $\rho=0.999$ |
+| Adam | 0.001 | $\rho_1=0.9,\rho_2=0.999$ |
+
+下图是各个优化器训练器损失的变化图;
+
+
+
+下图是对loss取以100为底的对数后得到的曲线。
+
+
+
+从这两张图中可以发现,在当前数据集上,各个优化器收敛速度相差不多,SGD收敛的波动较大;AdaGrad、RMSProp、Adam收敛的速度类似,比起SGD更快,其中AdaGrad的初始值较大;Adam能够保持较好的稳定性与较快的收敛速度,兼有了Momentum与RMSProp的优点。后期AdaGrad显然loss波动程度最小,Adam次之,momentum、RMSProp、SGD后期都有比较明显的震荡。
+
+
+
+后期,RMSProp的收敛值要略高于其他优化器,但是没有明显的差异;SGD能够达到比较理想的收敛值,但是Adam的loss曲线振荡却比较大,说明在训练后期可以使用SGD使模型损失收敛到更优的值。
+
+## 五、权重初始化
+
+torch中layer的初始化方式可以在init文件中找到,包含许多初始化方法,如均匀分布初始化`uniform_()`、正态分布初始化 `normal_()`、截断正态分布初始化`trunc_normal()`、常数初始化`constant_()`、dirac delta函数初始化`dirac_()`、xavier均匀分布初始化`xavier_uniform_()`、xavier正态分布初始化`xavier_normal_()`,以及本次实验中在线性层使用的初始化方式`kaiming_uniform_()`和`kaiming_normal_()`。篇幅有限,难以一一介绍,因此笔者挑选了正态分布初始化、Xavier初始化以及kaiming初始化为例进行实验。
+
+### 1. 正态分布初始化
+
+在早期的神经网络模型中,常常会使用正态分布 $N(0, 0.1)$ 产生随机数作为权重的初始值,并通过一个个缩放因子 $\beta$ 控制其大小,对于小型的神经网络结构,这一初始化方式是有效的。但是随着网络层数增加,以此分布初始化的参数经过 $tanh()$ 函数激活后,或越来越多地聚集到0附近,抑制了神经网络每层的输出,进而导致网络在反向传播时出现梯度消失,丧失学习能力。
+
+下图是使用了正态分布初始化的模型,每一层经过 $tanh()$ 函数激活后的活性值分布直方图,其中,每一层神经元数量均为4096,缩放因子取值为0.01:
+
+
+
+输出呈现处明显地向0靠拢的趋势,当输出归0后,模型在反向传播时误差项在与其进行乘法运算后也会归零,最终导致梯度消失。如果我们调整缩放因子的大小为0.06,则会发现每一层的输出则会逐渐聚集到1和-1周围:
+
+
+
+这表明每一层的净输出使得 $tanh()$ 函数过饱和,而在过饱和处,激活函数的导数值趋近于0,从而也会导致梯度消失。
+
+### 2. Xavier初始化
+
+为了让神经网络每层的输出能够保持稳定,在0的周围较为均匀的分布,使得激活函数导数不为0的情况下,输出不会归0,从而保证梯度的存在,Xavier初始化方式被提出。其原理是在正态分布初始化的基础上,舍弃了常数缩放因子,而使用 $\frac{2}{\sqrt{D_{in}+D_{out}}}$ 控制其分布,其原理是尽量保证映射前后样本的方差相等(参见[深度前馈网络与Xavier初始化原理-夕小瑶](https://zhuanlan.zhihu.com/p/27919794))。
+
+对于当前层每一个神经元的输出(见公式 $(1-1)$ ),其中每一个加项的方差可以表示为:
+$$
+\begin{aligned}
+Var(w_ix_i)&=E[w_i]^2Var(x_i)+E[x_i]^2Var(w_i)+Var(w_i)Var(x_i)
+\end{aligned}
+\tag{5-2-1}
+$$
+如果控制 $w_i$ 和 $x_i$ 的均值为0,且假设权重与输入是独立的,并且各自属于同一分布,则神经元输出的方差可以表示为:
+$$
+\begin{aligned}
+Var(z)&=\sum^n_{i=1}Var(w_{i})Var(x_{i}) \\\\
+&=nVar(w)Var(x)
+\end{aligned}
+\tag{5-2-2}
+$$
+为了保证映射前后方差相等,则需要控制 $nVar(w)=1$ ,即 $Var(w)=1/n$ ,那么在前向传播时,n为 $d_{in}$ ,反向传播时n为 $d_{out}$ ,折衷起见,可以令 $std(w)$ 为 $\frac{2}{\sqrt{D_{in}+D_{out}}}$ ,从而让模型在前向和反向传播时都可以保持输入输出方差稳定,实验结果也印证了这一结论。
+
+
+
+### 3. kaiming
+
+Xavier初始化是针对线性函数的,但是对于部分饱和类激活函数而言也比较有效如上面使用的tanh,因为其在0附近近似成线性,但是对于非饱和类激活函数如ReLU,效果并没有那么理想,因为在ReLU网络中,输出均值并不为0。
+
+
+
+经过ReLU函数激活后,每一层的输入还是逐渐聚拢在0周围,在这一背景下,He Kaiming提出了Kaiming初始化方法,其原理是假设每一层神经元被激活的概率为0.5,所以对公式 $(5-2-2)$ 进行修正:
+$$
+\begin{aligned}
+Var(z)&=\sum^n_{i=1}Var(w_{i})Var(x_{i}) \\\\
+&=\frac{n}{2}Var(w)Var(x)\\\\
+Var(w)&=\frac{2}{n}
+\end{aligned}
+\tag{5-3-1}
+$$
+在实际使用中,只需要满足前向和后向中的一个方向上的方差近似就可以起到很好的效果。从实验结果看,基本能够保证方差均值稳定。
+
+
+
+一般地,针对LeakyReLU,会进行一定的修正。在`torch.nn.Linear`类中,就使用了`kaiming_uniform_()`方法进行优化,,假设参数 $W$ 的维度为 $(d_{in}, d_{out})$ ,则 $W_{ij}$ 的分布可以表示为:
+$$
+\begin{aligned}
+bound &= \sqrt{\frac{6}{(1+a^2)d_{in}}} \\\\
+W_{ij} &\sim U(-bound, bound)
+\end{aligned}
+\tag{5-1-1}
+$$
+其中 $a$ 是使用的ReLU函数在负半轴的斜率,torch在Linear层默认取a为 $\sqrt5$ ,则 $bound = \sqrt{\frac{1}{d_{in}}}$。因为torch与numpy随机种子不同,无法进行比较。代码如下:
+
+```python
+def get_numpy_initialization():
+ bound1 = np.sqrt(1 / (28 * 28))
+ bound2 = np.sqrt(1 / 256)
+ bound3 = np.sqrt(1 / 64)
+
+ W1 = (np.random.rand(28 * 28, 256) - .5) * 2 * bound1
+ W2 = (np.random.rand(256, 64) - .5) * 2 * bound2
+ W3 = (np.random.rand(64, 10) - .5) * 2 * bound3
+
+ return W1, W2, W3
+```
+
+损失曲线基本与torch初始化类似,
+
+
+
+实际上,在numpy模型中,并没有使用LeakyReLU,根据torch的原则取 $bound = \sqrt{\frac{6}{d_{in}}}$ ,修改代码中对应的部分:
+
+```python
+def get_numpy_initialization():
+ bound1 = np.sqrt(6 / (28 * 28))
+ bound2 = np.sqrt(6 / 256)
+ bound3 = np.sqrt(6 / 64)
+
+ W1 = (np.random.rand(28 * 28, 256) - .5) * 2 * bound1
+ W2 = (np.random.rand(256, 64) - .5) * 2 * bound2
+ W3 = (np.random.rand(64, 10) - .5) * 2 * bound3
+
+ return W1, W2, W3
+```
+
+前三个epoch测试集准确率分别为:0.9532,0.9659,0.9734,与torch默认的初始化方式结果接近。
+
+测试模型每层经过ReLU函数激活的活性值并进行可视化处理:
+
+使用kaiming init方法:
+
+
+
+(-1,1)范围内正态分布初始化:
+
+
+
+可以发现,kaiming初始化方法能够保持每一层的输出的均值和方差稳定,这会使得神经网络在训练的过程中出现梯度爆炸或消失的可能性减少。
+
+## 六、代码使用方法
+
+```python
+# 默认使用numpy_mini_batch与numpy_initialization
+python numpy_mnist.py v # 权重初始化比较
+python numpy_mnist.py o # 优化器比较
+python numpy_mnist.py # 默认训练与测试(SGD,epoch=3,lr=0.1)
+```
\ No newline at end of file
diff --git a/assignment-2/submission/16300110008/img/epoch=3,lr=0.0001.png b/assignment-2/submission/16300110008/img/epoch=3,lr=0.0001.png
new file mode 100644
index 0000000000000000000000000000000000000000..606e56a9dc8c0a18c5ab489005b6546159b441e1
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=3,lr=0.0001.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=3,lr=0.001.png b/assignment-2/submission/16300110008/img/epoch=3,lr=0.001.png
new file mode 100644
index 0000000000000000000000000000000000000000..960dc86d975af6b88012bef867ff92a544d33446
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=3,lr=0.001.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=3,lr=0.01.png b/assignment-2/submission/16300110008/img/epoch=3,lr=0.01.png
new file mode 100644
index 0000000000000000000000000000000000000000..5230a0ca1902226472efa4b251b3e60606427d57
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=3,lr=0.01.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=3,lr=0.1.png b/assignment-2/submission/16300110008/img/epoch=3,lr=0.1.png
new file mode 100644
index 0000000000000000000000000000000000000000..70edcf5abe2a755fe20b9e4f635f42f21e2d58e7
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=3,lr=0.1.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=50,lr=0.1,acc.png b/assignment-2/submission/16300110008/img/epoch=50,lr=0.1,acc.png
new file mode 100644
index 0000000000000000000000000000000000000000..f6f52dae2e3119f055ad7c15eefd32565dbc9943
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=50,lr=0.1,acc.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=50,lr=0.1,loss.png b/assignment-2/submission/16300110008/img/epoch=50,lr=0.1,loss.png
new file mode 100644
index 0000000000000000000000000000000000000000..641e984914fc16fba18e1632d12b989546a56fa2
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=50,lr=0.1,loss.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=50,lr=0.1.png b/assignment-2/submission/16300110008/img/epoch=50,lr=0.1.png
new file mode 100644
index 0000000000000000000000000000000000000000..46e307edb61e880b41535470c9681e6b07faede8
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=50,lr=0.1.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=50,lr=1,acc.png b/assignment-2/submission/16300110008/img/epoch=50,lr=1,acc.png
new file mode 100644
index 0000000000000000000000000000000000000000..f6f52dae2e3119f055ad7c15eefd32565dbc9943
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=50,lr=1,acc.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=50,lr=1,loss.png b/assignment-2/submission/16300110008/img/epoch=50,lr=1,loss.png
new file mode 100644
index 0000000000000000000000000000000000000000..641e984914fc16fba18e1632d12b989546a56fa2
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=50,lr=1,loss.png differ
diff --git a/assignment-2/submission/16300110008/img/epoch=50,lr=1.png b/assignment-2/submission/16300110008/img/epoch=50,lr=1.png
new file mode 100644
index 0000000000000000000000000000000000000000..46e307edb61e880b41535470c9681e6b07faede8
Binary files /dev/null and b/assignment-2/submission/16300110008/img/epoch=50,lr=1.png differ
diff --git a/assignment-2/submission/16300110008/img/init2.png b/assignment-2/submission/16300110008/img/init2.png
new file mode 100644
index 0000000000000000000000000000000000000000..813abf5d88a7b54e48d1a8ea7c00a84975a4c9e0
Binary files /dev/null and b/assignment-2/submission/16300110008/img/init2.png differ
diff --git a/assignment-2/submission/16300110008/img/init3.png b/assignment-2/submission/16300110008/img/init3.png
new file mode 100644
index 0000000000000000000000000000000000000000..5a1b2b8989a76bb6fabe8a96661744975d38b216
Binary files /dev/null and b/assignment-2/submission/16300110008/img/init3.png differ
diff --git a/assignment-2/submission/16300110008/img/kaiming_init1.png b/assignment-2/submission/16300110008/img/kaiming_init1.png
new file mode 100644
index 0000000000000000000000000000000000000000..edbe2c89d268711d0709752311cdbb2e7a1f8a46
Binary files /dev/null and b/assignment-2/submission/16300110008/img/kaiming_init1.png differ
diff --git a/assignment-2/submission/16300110008/img/m_(1280,320).png b/assignment-2/submission/16300110008/img/m_(1280,320).png
new file mode 100644
index 0000000000000000000000000000000000000000..87e7298570342f056190c2b991979de0b8f8b51d
Binary files /dev/null and b/assignment-2/submission/16300110008/img/m_(1280,320).png differ
diff --git a/assignment-2/submission/16300110008/img/m_(254,64).png b/assignment-2/submission/16300110008/img/m_(254,64).png
new file mode 100644
index 0000000000000000000000000000000000000000..8bad30d32f2c4ed94abe370e81e08b54f14b6e86
Binary files /dev/null and b/assignment-2/submission/16300110008/img/m_(254,64).png differ
diff --git a/assignment-2/submission/16300110008/img/m_(512,128).png b/assignment-2/submission/16300110008/img/m_(512,128).png
new file mode 100644
index 0000000000000000000000000000000000000000..366653f463fd3a69e320015bc234e41e71a16b52
Binary files /dev/null and b/assignment-2/submission/16300110008/img/m_(512,128).png differ
diff --git a/assignment-2/submission/16300110008/img/m_acc_(1280,320).png b/assignment-2/submission/16300110008/img/m_acc_(1280,320).png
new file mode 100644
index 0000000000000000000000000000000000000000..6da587fc28d39bc26dfc98d7886b9e80667ffe37
Binary files /dev/null and b/assignment-2/submission/16300110008/img/m_acc_(1280,320).png differ
diff --git a/assignment-2/submission/16300110008/img/m_acc_(256,64).png b/assignment-2/submission/16300110008/img/m_acc_(256,64).png
new file mode 100644
index 0000000000000000000000000000000000000000..7a8e5052e7e02b92b36427df4a1a23a98303a525
Binary files /dev/null and b/assignment-2/submission/16300110008/img/m_acc_(256,64).png differ
diff --git a/assignment-2/submission/16300110008/img/m_acc_(512,128).png b/assignment-2/submission/16300110008/img/m_acc_(512,128).png
new file mode 100644
index 0000000000000000000000000000000000000000..a9a1e14089ce4a7338a26b277f9a606a28ac5308
Binary files /dev/null and b/assignment-2/submission/16300110008/img/m_acc_(512,128).png differ
diff --git a/assignment-2/submission/16300110008/img/momentum.png b/assignment-2/submission/16300110008/img/momentum.png
new file mode 100644
index 0000000000000000000000000000000000000000..49cbe84c1e395b15e157df828373926a753aae7b
Binary files /dev/null and b/assignment-2/submission/16300110008/img/momentum.png differ
diff --git a/assignment-2/submission/16300110008/img/normal_init.png b/assignment-2/submission/16300110008/img/normal_init.png
new file mode 100644
index 0000000000000000000000000000000000000000..b968820ee6b6323c28d6ec148c29d28c4434849f
Binary files /dev/null and b/assignment-2/submission/16300110008/img/normal_init.png differ
diff --git a/assignment-2/submission/16300110008/img/normal_init2.png b/assignment-2/submission/16300110008/img/normal_init2.png
new file mode 100644
index 0000000000000000000000000000000000000000..937983cb9d5768d88cd3104a454c8c53493225dd
Binary files /dev/null and b/assignment-2/submission/16300110008/img/normal_init2.png differ
diff --git a/assignment-2/submission/16300110008/img/numpy_init.png b/assignment-2/submission/16300110008/img/numpy_init.png
new file mode 100644
index 0000000000000000000000000000000000000000..df605a5d8d5c4890ddde9b11a048ce3f8cd5d356
Binary files /dev/null and b/assignment-2/submission/16300110008/img/numpy_init.png differ
diff --git a/assignment-2/submission/16300110008/img/optim1.png b/assignment-2/submission/16300110008/img/optim1.png
new file mode 100644
index 0000000000000000000000000000000000000000..034e9b925200e453615bb6abd75db8b21088a2d5
Binary files /dev/null and b/assignment-2/submission/16300110008/img/optim1.png differ
diff --git a/assignment-2/submission/16300110008/img/optim2-1.png b/assignment-2/submission/16300110008/img/optim2-1.png
new file mode 100644
index 0000000000000000000000000000000000000000..b00a3bd94d29eb4486fe633c7fb1483ed0e64965
Binary files /dev/null and b/assignment-2/submission/16300110008/img/optim2-1.png differ
diff --git a/assignment-2/submission/16300110008/img/optim2.png b/assignment-2/submission/16300110008/img/optim2.png
new file mode 100644
index 0000000000000000000000000000000000000000..2bc60abbe4116142e57408fde28185bf3d35866d
Binary files /dev/null and b/assignment-2/submission/16300110008/img/optim2.png differ
diff --git a/assignment-2/submission/16300110008/img/train_with_noise.png b/assignment-2/submission/16300110008/img/train_with_noise.png
new file mode 100644
index 0000000000000000000000000000000000000000..b82edb9bea80ce1cb9c4d9530a48528665d69a54
Binary files /dev/null and b/assignment-2/submission/16300110008/img/train_with_noise.png differ
diff --git a/assignment-2/submission/16300110008/img/train_without_noise.png b/assignment-2/submission/16300110008/img/train_without_noise.png
new file mode 100644
index 0000000000000000000000000000000000000000..f2213af4b5a0091350a4895a746c99b86d6d085e
Binary files /dev/null and b/assignment-2/submission/16300110008/img/train_without_noise.png differ
diff --git a/assignment-2/submission/16300110008/img/xavier_init.png b/assignment-2/submission/16300110008/img/xavier_init.png
new file mode 100644
index 0000000000000000000000000000000000000000..68669b829accf73edf59ff41cb4902414cd32b1a
Binary files /dev/null and b/assignment-2/submission/16300110008/img/xavier_init.png differ
diff --git a/assignment-2/submission/16300110008/img/xavier_init2.png b/assignment-2/submission/16300110008/img/xavier_init2.png
new file mode 100644
index 0000000000000000000000000000000000000000..c06e2c1a813b2007089b839771362f50ab432927
Binary files /dev/null and b/assignment-2/submission/16300110008/img/xavier_init2.png differ
diff --git a/assignment-2/submission/16300110008/numpy_fnn.py b/assignment-2/submission/16300110008/numpy_fnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9bff17642d6d888ad6326c8bd7939c2e2c66502
--- /dev/null
+++ b/assignment-2/submission/16300110008/numpy_fnn.py
@@ -0,0 +1,279 @@
+import numpy as np
+
+
+class NumpyOp:
+
+ def __init__(self):
+ self.memory = {}
+ self.epsilon = 1e-12
+
+
+class Matmul(NumpyOp):
+
+ def forward(self, x, W):
+ """
+ x: shape(N, d)
+ w: shape(d, d')
+ """
+ self.memory['x'] = x
+ self.memory['W'] = W
+ h = np.matmul(x, W)
+ return h
+
+ def __call__(self, x, W):
+ return self.forward(x, W)
+
+ def backward(self, grad_y):
+ """
+ grad_y: shape(N, d')
+ """
+ x = self.memory['x']
+ W = self.memory['W']
+ grad_W = np.matmul(x.T, grad_y)
+ grad_x = np.matmul(grad_y, W.T)
+
+ return grad_x, grad_W
+
+
+class Relu(NumpyOp):
+
+ def forward(self, x):
+ self.memory['x'] = x
+ return np.where(x > 0, x, np.zeros_like(x))
+
+ def __call__(self, x):
+ return self.forward(x)
+
+ def backward(self, grad_y):
+ """
+ grad_y: same shape as x
+ """
+ x = self.memory['x']
+ grad_x = np.where(x > 0, grad_y, np.zeros_like(grad_y))
+
+ return grad_x
+
+
+class Log(NumpyOp):
+
+ def forward(self, x):
+ """
+ x: shape(N, c)
+ """
+
+ out = np.log(x + self.epsilon)
+ self.memory['x'] = x
+
+ return out
+
+ def __call__(self, x):
+ return self.forward(x)
+
+ def backward(self, grad_y):
+ """
+ grad_y: same shape as x
+ """
+ x = self.memory['x']
+ grad_x = 1 / (x + self.epsilon) * grad_y
+
+ return grad_x
+
+
+class Softmax(NumpyOp):
+ """
+ softmax over last dimension
+ """
+
+ def forward(self, x):
+ """
+ x: shape(N, c)
+ """
+ max = np.max(x, axis=1, keepdims=True)
+ out = np.exp(x - max) / np.sum(np.exp(x - max), axis=1).reshape(-1, 1)
+ self.memory['A'] = out
+
+ return out
+
+ def __call__(self, x):
+ return self.forward(x)
+
+ def backward(self, grad_y):
+ """
+ grad_y: same shape as x
+ """
+ N, C = grad_y.shape
+ A = self.memory['A']
+ # 扩充激活值的维度,计算softmax导数
+ temp = A[:, np.newaxis, :] * np.eye(C) - np.matmul(A[:, np.newaxis, :].transpose(0, 2, 1), A[:, np.newaxis, :])
+ # 扩充误差项与dX进行乘法
+ grad_x = np.matmul(grad_y[:, np.newaxis, :], temp).squeeze(1)
+
+ return grad_x
+
+
+class NumpyLoss:
+
+ def __init__(self):
+ self.target = None
+
+ def get_loss(self, pred, target):
+ self.target = target
+ return (-pred * target).sum(axis=1).mean()
+
+ def backward(self):
+ return -self.target / self.target.shape[0]
+
+
+class NumpyModel:
+ def __init__(self):
+ self.W1 = np.random.normal(size=(28 * 28, 256))
+ self.W2 = np.random.normal(size=(256, 64))
+ self.W3 = np.random.normal(size=(64, 10))
+
+ # 以下算子会在 forward 和 backward 中使用
+ self.matmul_1 = Matmul()
+ self.relu_1 = Relu()
+ self.matmul_2 = Matmul()
+ self.relu_2 = Relu()
+ self.matmul_3 = Matmul()
+ self.softmax = Softmax()
+ self.log = Log()
+
+ # 以下变量需要在 backward 中更新.softmax_grad, log_grad 等为算子反向传播的梯度( loss 关于算子输入的偏导)
+ self.x1_grad, self.W1_grad = None, None
+ self.relu_1_grad = None
+ self.x2_grad, self.W2_grad = None, None
+ self.relu_2_grad = None
+ self.x3_grad, self.W3_grad = None, None
+ self.softmax_grad = None
+ self.log_grad = None
+
+ self.memory = {}
+
+ def forward(self, x):
+ x = x.reshape(-1, 28 * 28)
+
+ x = self.relu_1(self.matmul_1(x, self.W1))
+ x = self.relu_2(self.matmul_2(x, self.W2))
+ x = self.matmul_3(x, self.W3)
+ x = self.softmax(x)
+ x = self.log(x)
+
+ return x
+
+ def backward(self, y):
+ grad_y = y
+
+ self.log_grad = self.log.backward(grad_y)
+ self.softmax_grad = self.softmax.backward(self.log_grad)
+ self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad)
+ self.relu_2_grad = self.relu_2.backward(self.x3_grad)
+ self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad)
+ self.relu_1_grad = self.relu_1.backward(self.x2_grad)
+ self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad)
+
+ def optimize(self, learning_rate, mode='SGD', h_params=None):
+ """
+ 优化器,用于更新参数
+ :param learning_rate: 学习率
+ :param mode: 优化器类型,包括SGD AdaGrad RMSProp Adam
+ :param h_params: 优化器所需的超参数
+ :return: None
+ """
+ if mode == 'SGD':
+ self.W1 -= learning_rate * self.W1_grad
+ self.W2 -= learning_rate * self.W2_grad
+ self.W3 -= learning_rate * self.W3_grad
+ elif mode == 'momentum':
+ # beta 一般取0.9
+ beta = h_params[0]
+ # 获取上一时刻的动量,初始值为0
+ V1, V2, V3 = self.memory.get('V1', 0), self.memory.get('V2', 0), self.memory.get('V3', 0)
+ # 更新动量
+ V1 = beta * V1 + (1 - beta) * self.W1_grad
+ V2 = beta * V2 + (1 - beta) * self.W2_grad
+ V3 = beta * V3 + (1 - beta) * self.W3_grad
+ # 存储当前动量
+ self.memory['V1'] = V1
+ self.memory['V2'] = V2
+ self.memory['V3'] = V3
+ # 更新参数
+ self.W1 -= learning_rate * V1
+ self.W2 -= learning_rate * V2
+ self.W3 -= learning_rate * V3
+
+ elif mode == 'AdaGrad':
+ # 学习率大于1e-2以后梯度会消失
+ epsilon = 1e-7
+ # 读取历史梯度值平方和
+ r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+ # 更新历史梯度值平方和
+ r1 += np.square(self.W1_grad)
+ r2 += np.square(self.W2_grad)
+ r3 += np.square(self.W3_grad)
+ # 存储历史梯度值平方和
+ self.memory['r1'] = r1
+ self.memory['r2'] = r2
+ self.memory['r3'] = r3
+ # 更新参数
+ self.W1 -= learning_rate / (epsilon + np.sqrt(r1)) * self.W1_grad
+ self.W2 -= learning_rate / (epsilon + np.sqrt(r2)) * self.W2_grad
+ self.W3 -= learning_rate / (epsilon + np.sqrt(r3)) * self.W3_grad
+
+ elif mode == 'RMSProp':
+ # lr 1e-3, rho 0.999
+ epsilon = 1e-6
+ rho = h_params[0]
+ # 读取历史梯度值平方和
+ r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+ # 更新历史梯度值平方和
+ r1 = rho * r1 + (1 - rho) * np.square(self.W1_grad)
+ r2 = rho * r2 + (1 - rho) * np.square(self.W2_grad)
+ r3 = rho * r3 + (1 - rho) * np.square(self.W3_grad)
+ # 存储历史梯度值平方和
+ self.memory['r1'] = r1
+ self.memory['r2'] = r2
+ self.memory['r3'] = r3
+ # 更新参数
+ self.W1 -= learning_rate / (epsilon + np.sqrt(r1)) * self.W1_grad
+ self.W2 -= learning_rate / (epsilon + np.sqrt(r2)) * self.W2_grad
+ self.W3 -= learning_rate / (epsilon + np.sqrt(r3)) * self.W3_grad
+
+ elif mode == 'Adam':
+ # lr=1e-3, rho1=0.9, rho2=0.999
+ epsilon = 1e-8
+ rho1, rho2 = h_params[0], h_params[1]
+ # 确定当前时刻值
+ t = self.memory.get('t', 0)
+ t += 1
+ # 读取历史梯度值平方和以及历史动量,初始值均为0
+ s1, s2, s3 = self.memory.get('s1', 0), self.memory.get('s2', 0), self.memory.get('s3', 0)
+ r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+ # 更新动量
+ s1 = rho1 * s1 + (1 - rho1) * self.W1_grad
+ s2 = rho1 * s2 + (1 - rho1) * self.W2_grad
+ s3 = rho1 * s3 + (1 - rho1) * self.W3_grad
+ # 存储动量
+ self.memory['s1'] = s1
+ self.memory['s2'] = s2
+ self.memory['s3'] = s3
+ # 更新梯度值平方和
+ r1 = rho2 * r1 + (1 - rho2) * np.square(self.W1_grad)
+ r2 = rho2 * r2 + (1 - rho2) * np.square(self.W2_grad)
+ r3 = rho2 * r3 + (1 - rho2) * np.square(self.W3_grad)
+ # 存储梯度值平方和
+ self.memory['r1'] = r1
+ self.memory['r2'] = r2
+ self.memory['r3'] = r3
+ # 修正当前动量
+ s1_hat = s1 / (1 - np.power(rho1, t))
+ s2_hat = s2 / (1 - np.power(rho1, t))
+ s3_hat = s3 / (1 - np.power(rho1, t))
+ # 修正当前梯度平方和
+ r1_hat = r1 / (1 - np.power(rho2, t))
+ r2_hat = r2 / (1 - np.power(rho2, t))
+ r3_hat = r3 / (1 - np.power(rho2, t))
+ # 更新梯度
+ self.W1 -= learning_rate / (epsilon + np.sqrt(r1_hat)) * s1_hat
+ self.W2 -= learning_rate / (epsilon + np.sqrt(r2_hat)) * s2_hat
+ self.W3 -= learning_rate / (epsilon + np.sqrt(r3_hat)) * s3_hat
diff --git a/assignment-2/submission/16300110008/numpy_mnist.py b/assignment-2/submission/16300110008/numpy_mnist.py
new file mode 100644
index 0000000000000000000000000000000000000000..f52192aedd664743252cd1d96ecdb8a8b37dc8a3
--- /dev/null
+++ b/assignment-2/submission/16300110008/numpy_mnist.py
@@ -0,0 +1,460 @@
+import numpy as np
+from numpy_fnn import NumpyModel, NumpyLoss, Matmul, Relu, Log, Softmax
+from utils import download_mnist, batch, mini_batch, get_torch_initialization, plot_curve, one_hot
+from matplotlib import pyplot as plt
+
+np.random.seed(1)
+
+
+def gaussian_noise(img, mu=0.1307, std=0.3081):
+ """
+ 产生随机噪声,噪声的均值与方差与图像采样数据一致
+ :param img: 待处理的数据
+ :param mu: 噪声均值
+ :param std: 噪声方差
+ :return: 经过噪声处理后的图像
+ """
+ epsilon = 1
+ sigma = std
+ noise = np.random.normal(mu, sigma, img.shape) * epsilon
+ # 设置阈值
+ out = np.clip(img + noise, -1, 1)
+ return out
+
+
+def get_numpy_initialization(mode='kaiming', test=False, neuron_shape=(256, 64)):
+ """
+ 使用numpy模拟torch初始化
+ :param neuron_shape: 控制隐藏层神经元数量
+ :param mode: 初始化模式:normal Xavier kaiming_uniform
+ :param test: 当用来进行探究初始化实验时,设为True,此时每层均为4096个神经元,否则为False,与fnn模型结构一致
+ :return: 参数W1 W2 W3
+ """
+ # 设置神经元数量
+ if test:
+ d0 = 28 * 28
+ d1 = 4096
+ d2 = 4096
+ d3 = 4096
+ else:
+ d0 = 28 * 28
+ d1, d2 = neuron_shape
+ d3 = 10
+ # 设置初始化方式
+ if mode == 'normal':
+ factor = 0.01 # 缩放因子,用来控制参数初始化的范围
+ W1 = np.random.normal(size=(d0, d1)) * factor
+ W2 = np.random.normal(size=(d1, d2)) * factor
+ W3 = np.random.normal(size=(d2, d3)) * factor
+
+ return W1, W2, W3
+
+ if mode == 'Xavier':
+ W1 = np.random.normal(size=(d0, d1)) * (2 / (np.sqrt(d0) + np.sqrt(d1)))
+ W2 = np.random.normal(size=(d1, d2)) * (2 / (np.sqrt(d1) + np.sqrt(d2)))
+ W3 = np.random.normal(size=(d2, d3)) * (2 / (np.sqrt(d2) + np.sqrt(d3)))
+ return W1, W2, W3
+
+ elif mode == 'kaiming':
+ bound1 = np.sqrt(6 / d0)
+ bound2 = np.sqrt(6 / d1)
+ bound3 = np.sqrt(6 / d2)
+
+ W1 = (np.random.rand(d0, d1) - .5) * 2 * bound1
+ W2 = (np.random.rand(d1, d2) - .5) * 2 * bound2
+ W3 = (np.random.rand(d2, d3) - .5) * 2 * bound3
+
+ return W1, W2, W3
+
+
+def visualize_weight(mode='kaiming', act='ReLU', test=True):
+ """
+ 用来可视化每层的参数分布情况
+ :param mode: 使用的初始化方式
+ :param act: 激活函数类型:tanh ReLU
+ :param test: 当用来进行探究初始化实验时,设为True,此时每层均为4096个神经元,否则为False,与fnn模型结构一致
+ :return: None
+ """
+ print(mode, act)
+ x = np.random.rand(16, 28 * 28) # 随机初始化输入
+ W1, W2, W3 = get_numpy_initialization(mode, test) # 获取初始化参数
+ W = [0, W1, W2, W3]
+
+ # 下面这个循环模拟了一个具有三个隐藏层的神经网络
+ for i in range(1, 4):
+ if act == 'tanh':
+ x = np.tanh(x.dot(W[i]))
+ elif act == 'ReLU':
+ x = np.maximum(0, x.dot(W[i])) # 过滤掉小于0的值,模拟ReLU
+ else:
+ raise ValueError("WRONG ACTIVATION")
+ # 获取每一层经过激活后的输出
+ mean = np.mean(x)
+ std = np.std(x)
+ # 绘制分布直方图
+ plt.subplot(1, 3, i)
+ plt.hist(x.flatten())
+ if act == 'ReLU':
+ lim = 5
+ else:
+ lim = 1
+
+ plt.xlim(-lim, lim)
+ plt.title(f'layer{i}\nmean={mean:.2f}\nstd={std:.2f}')
+
+ plt.show()
+
+
+def mini_batch_numpy(dataset, batch_size=128, noise=False):
+ """
+ 使用numpy实现minibatch
+ :param dataset: torch获取的MNIST数据集
+ :param batch_size: 批大小
+ :return: 一个list,其中每个元素是一个batch(x, y)
+ """
+ # 对数据进行标准化处理 mean=(0.1307,), std=(0.3081,)
+ X = dataset.data.numpy() / 255
+ mean = 0.1307
+ std = 0.3081
+ X = (X - mean) / std
+ y = dataset.targets.numpy()
+
+ # 添加高斯噪声
+ if noise:
+ X = gaussian_noise(X)
+
+ # 打乱样本和标签
+ n = X.shape[0]
+ idx = np.arange(n)
+ np.random.shuffle(idx)
+ X = X[idx]
+ y = y[idx]
+
+ # 用于切分数据生成batches
+ iter_num = int(np.ceil(n / batch_size))
+ dataloader = \
+ [(X[i * batch_size: (i + 1) * batch_size], y[i * batch_size: (i + 1) * batch_size])
+ # 处理当最后的部分不足batch size时
+ if (i + 1) * batch_size <= n
+ else (X[i * batch_size:], y[i * batch_size:])
+ for i in range(iter_num)]
+ return dataloader
+
+
+class NumpyModel_neuron:
+ def __init__(self, neuron_shape=(256, 64)):
+ h2, h3 = neuron_shape
+ self.W1 = np.random.normal(size=(28 * 28, h2))
+ self.W2 = np.random.normal(size=(h2, h3))
+ self.W3 = np.random.normal(size=(h3, 10))
+
+ # 以下算子会在 forward 和 backward 中使用
+ self.matmul_1 = Matmul()
+ self.relu_1 = Relu()
+ self.matmul_2 = Matmul()
+ self.relu_2 = Relu()
+ self.matmul_3 = Matmul()
+ self.softmax = Softmax()
+ self.log = Log()
+
+ # 以下变量需要在 backward 中更新.softmax_grad, log_grad 等为算子反向传播的梯度( loss 关于算子输入的偏导)
+ self.x1_grad, self.W1_grad = None, None
+ self.relu_1_grad = None
+ self.x2_grad, self.W2_grad = None, None
+ self.relu_2_grad = None
+ self.x3_grad, self.W3_grad = None, None
+ self.softmax_grad = None
+ self.log_grad = None
+
+ self.memory = {}
+
+ def forward(self, x):
+ x = x.reshape(-1, 28 * 28)
+
+ x = self.relu_1(self.matmul_1(x, self.W1))
+ x = self.relu_2(self.matmul_2(x, self.W2))
+ x = self.matmul_3(x, self.W3)
+ x = self.softmax(x)
+ x = self.log(x)
+
+ return x
+
+ def backward(self, y):
+ grad_y = y
+
+ self.log_grad = self.log.backward(grad_y)
+ self.softmax_grad = self.softmax.backward(self.log_grad)
+ self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad)
+ self.relu_2_grad = self.relu_2.backward(self.x3_grad)
+ self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad)
+ self.relu_1_grad = self.relu_1.backward(self.x2_grad)
+ self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad)
+
+ def optimize(self, learning_rate, mode='SGD', h_params=None):
+ """
+ 优化器,用于更新参数
+ :param learning_rate: 学习率
+ :param mode: 优化器类型,包括SGD AdaGrad RMSProp Adam
+ :param h_params: 优化器所需的超参数
+ :return: None
+ """
+ if mode == 'SGD':
+ self.W1 -= learning_rate * self.W1_grad
+ self.W2 -= learning_rate * self.W2_grad
+ self.W3 -= learning_rate * self.W3_grad
+ elif mode == 'momentum':
+ # beta 一般取0.9
+ beta = h_params[0]
+ # 获取上一时刻的动量,初始值为0
+ V1, V2, V3 = self.memory.get('V1', 0), self.memory.get('V2', 0), self.memory.get('V3', 0)
+ # 更新动量
+ V1 = beta * V1 + (1 - beta) * self.W1_grad
+ V2 = beta * V2 + (1 - beta) * self.W2_grad
+ V3 = beta * V3 + (1 - beta) * self.W3_grad
+ # 存储当前动量
+ self.memory['V1'] = V1
+ self.memory['V2'] = V2
+ self.memory['V3'] = V3
+ # 更新参数
+ self.W1 -= learning_rate * V1
+ self.W2 -= learning_rate * V2
+ self.W3 -= learning_rate * V3
+
+ elif mode == 'AdaGrad':
+ # 学习率大于1e-2以后梯度会消失
+ epsilon = 1e-7
+ # 读取历史梯度值平方和
+ r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+ # 更新历史梯度值平方和
+ r1 += np.square(self.W1_grad)
+ r2 += np.square(self.W2_grad)
+ r3 += np.square(self.W3_grad)
+ # 存储历史梯度值平方和
+ self.memory['r1'] = r1
+ self.memory['r2'] = r2
+ self.memory['r3'] = r3
+ # 更新参数
+ self.W1 -= learning_rate / (epsilon + np.sqrt(r1)) * self.W1_grad
+ self.W2 -= learning_rate / (epsilon + np.sqrt(r2)) * self.W2_grad
+ self.W3 -= learning_rate / (epsilon + np.sqrt(r3)) * self.W3_grad
+
+ elif mode == 'RMSProp':
+ # lr 1e-3, rho 0.999
+ epsilon = 1e-6
+ rho = h_params[0]
+ # 读取历史梯度值平方和
+ r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+ # 更新历史梯度值平方和
+ r1 = rho * r1 + (1 - rho) * np.square(self.W1_grad)
+ r2 = rho * r2 + (1 - rho) * np.square(self.W2_grad)
+ r3 = rho * r3 + (1 - rho) * np.square(self.W3_grad)
+ # 存储历史梯度值平方和
+ self.memory['r1'] = r1
+ self.memory['r2'] = r2
+ self.memory['r3'] = r3
+ # 更新参数
+ self.W1 -= learning_rate / (epsilon + np.sqrt(r1)) * self.W1_grad
+ self.W2 -= learning_rate / (epsilon + np.sqrt(r2)) * self.W2_grad
+ self.W3 -= learning_rate / (epsilon + np.sqrt(r3)) * self.W3_grad
+
+ elif mode == 'Adam':
+ # lr=1e-3, rho1=0.9, rho2=0.999
+ epsilon = 1e-8
+ rho1, rho2 = h_params[0], h_params[1]
+ # 确定当前时刻值
+ t = self.memory.get('t', 0)
+ t += 1
+ # 读取历史梯度值平方和以及历史动量,初始值均为0
+ s1, s2, s3 = self.memory.get('s1', 0), self.memory.get('s2', 0), self.memory.get('s3', 0)
+ r1, r2, r3 = self.memory.get('r1', 0), self.memory.get('r2', 0), self.memory.get('r3', 0)
+ # 更新动量
+ s1 = rho1 * s1 + (1 - rho1) * self.W1_grad
+ s2 = rho1 * s2 + (1 - rho1) * self.W2_grad
+ s3 = rho1 * s3 + (1 - rho1) * self.W3_grad
+ # 存储动量
+ self.memory['s1'] = s1
+ self.memory['s2'] = s2
+ self.memory['s3'] = s3
+ # 更新梯度值平方和
+ r1 = rho2 * r1 + (1 - rho2) * np.square(self.W1_grad)
+ r2 = rho2 * r2 + (1 - rho2) * np.square(self.W2_grad)
+ r3 = rho2 * r3 + (1 - rho2) * np.square(self.W3_grad)
+ # 存储梯度值平方和
+ self.memory['r1'] = r1
+ self.memory['r2'] = r2
+ self.memory['r3'] = r3
+ # 修正当前动量
+ s1_hat = s1 / (1 - np.power(rho1, t))
+ s2_hat = s2 / (1 - np.power(rho1, t))
+ s3_hat = s3 / (1 - np.power(rho1, t))
+ # 修正当前梯度平方和
+ r1_hat = r1 / (1 - np.power(rho2, t))
+ r2_hat = r2 / (1 - np.power(rho2, t))
+ r3_hat = r3 / (1 - np.power(rho2, t))
+ # 更新梯度
+ self.W1 -= learning_rate / (epsilon + np.sqrt(r1_hat)) * s1_hat
+ self.W2 -= learning_rate / (epsilon + np.sqrt(r2_hat)) * s2_hat
+ self.W3 -= learning_rate / (epsilon + np.sqrt(r3_hat)) * s3_hat
+
+
+def numpy_run(arg_list, neuron_shape=(256 * 1, 64 * 1), modify=None, noise=None):
+ """
+ 训练开始
+ :param modify: 进行调整神经元实验的参数
+ :param neuron_shape: 用来控制隐藏层神经元数量
+ :param arg_list: 优化器类型与超参数列表,[1e-3, 'Adam', [0.9, 0.999]]
+ :return: None
+ """
+ # 获取超参数
+ if noise is None:
+ noise = [False, False]
+ lr = arg_list[0]
+ mode = arg_list[1]
+ print(mode)
+ h_params = arg_list[-1]
+
+ train_dataset, test_dataset = download_mnist()
+ model = NumpyModel()
+ if modify == 'neuron':
+ model = NumpyModel_neuron(neuron_shape=neuron_shape)
+ elif modify == 'layer':
+ model = NumpyModel_layer(neuron_shape=neuron_shape)
+ numpy_loss = NumpyLoss()
+ # 初始化选择
+ # model.W1, model.W2, model.W3 = get_torch_initialization()
+ # model.W1, model.W2, model.W3 = get_numpy_initialization()
+ model.W1, model.W2, model.W3 = get_numpy_initialization(neuron_shape=neuron_shape)
+
+ train_loss = []
+ test_acc = []
+ test_acc_noise = []
+ test_loss = []
+
+ epoch_number = 3
+ learning_rate = lr
+
+ for epoch in range(epoch_number):
+ # 选择minibatch方法
+ # for x, y in mini_batch(train_dataset):
+ for x, y in mini_batch_numpy(train_dataset, noise = noise[0]):
+ # print(x.numpy().shape)
+ y = one_hot(y)
+
+ y_pred = model.forward(x) # 已经在mini_batch_numpy中改为numpy类型
+ # y_pred = model.forward(x.numpy()) #如果使用torch_mini_batch启用此行
+ loss = numpy_loss.get_loss(y_pred, y)
+
+ model.backward(numpy_loss.backward())
+ model.optimize(learning_rate, mode, h_params)
+
+ train_loss.append(loss.item())
+
+ x, y = batch(test_dataset)[0]
+ # x = gaussian_noise(x)
+ y_pred = model.forward(x)
+ accuracy = np.mean((y_pred.argmax(axis=1) == y))
+ test_acc.append(accuracy)
+
+ if noise[1]:
+ x = gaussian_noise(x)
+ y_pred = model.forward(x)
+ accuracy = np.mean((y_pred.argmax(axis=1) == y))
+ test_acc_noise.append(accuracy)
+
+ y = one_hot(y)
+ loss = (-y_pred * y).sum(axis=1).mean()
+ test_loss.append(loss)
+ print('[{}] Accuracy: {:.4f}'.format(epoch, accuracy))
+
+ plot_curve(train_loss, str(neuron_shape))
+ if modify is not None:
+ return test_acc
+ elif noise[1]:
+ return test_acc, test_acc_noise
+ else:
+ return train_loss
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) > 1 and sys.argv[1] == 'v':
+ # 可视化权重
+ visualize_weight('normal', 'tanh')
+ visualize_weight('Xavier', 'tanh')
+ visualize_weight('Xavier', 'ReLU')
+ visualize_weight('kaiming', 'ReLU')
+ elif len(sys.argv) > 1 and sys.argv[1] == 'o':
+ # 比较优化器
+ cases = [
+ [0.1, 'SGD', None],
+ [0.1, 'momentum', [0.9]],
+ [1e-2, 'AdaGrad', None],
+ [1e-4, 'RMSProp', [0.999]],
+ [1e-3, 'Adam', [0.9, 0.999]]
+ ]
+
+ loss = []
+ for case in cases:
+ loss.append(numpy_run(case))
+ step = 1
+ # loss变化图
+ color = ['k', 'y', 'c', 'b', 'r']
+ line = ['-', '-', '--', '-.', ':']
+ optim = [case[1] for case in cases]
+ for i in range(5):
+ plt.subplot(3, 2, i + 1)
+ plt.plot(range(len(loss[i]))[::step], loss[i][::step], color=color[i], linestyle=line[i], label=optim[i])
+ plt.xlabel('step')
+ plt.ylabel('value')
+ plt.legend()
+ plt.show()
+
+ # 取对数绘图
+ for i in range(5):
+ plt.plot(range(len(loss[i]))[::step], np.log(loss[i][::step]) / np.log(100), color=color[i], linestyle=line[i],
+ label=optim[i])
+ plt.legend()
+ plt.xlabel('step')
+ plt.ylabel('log_value')
+ plt.show()
+
+ elif len(sys.argv) > 1 and (sys.argv[1] == 'm' or sys.argv[1] == 'd' or sys.argv[1] == 'f'):
+ # 神经元数量实验
+ if sys.argv[1] == 'm':
+ ns = (256 * 1, 64 * 1)
+ elif sys.argv[1] == 'n':
+ ns = (256 * 2, 64 * 2)
+ elif sys.argv[1] == 'k':
+ ns = (256 * 5, 64 * 5)
+ else:
+ raise ValueError("WRONG ARGV")
+
+ acc = numpy_run([0.1, 'SGD', None], neuron_shape=ns, modify='neuron')
+ plt.plot(range(len(acc)), acc)
+ plt.title(f"test acc {ns}")
+ plt.ylabel("acc")
+ plt.xlabel("epoch")
+ plt.show()
+ elif len(sys.argv) > 1 and (sys.argv[1] == 'g' or sys.argv[1] == 'n'):
+ # 带噪声实验
+ if sys.argv[1] == 'g':
+ test_acc, test_acc_noise = numpy_run([0.1, 'SGD', None], noise=[True, True]) # noise=[训练带噪,测试带噪]
+ plt.plot(range(len(test_acc)), test_acc, linestyle='--', marker='+', color='k', label='test without noise')
+ plt.plot(range(len(test_acc_noise)), test_acc_noise, marker='o', color='y', label='test with noise')
+ plt.xlabel("epoch")
+ plt.ylabel("acc")
+ plt.title("train_with_noise")
+ plt.legend()
+ plt.show()
+ elif sys.argv[1] == 'n':
+ test_acc, test_acc_noise = numpy_run([0.1, 'SGD', None], noise=[False, True])
+ plt.plot(range(len(test_acc)), test_acc, linestyle='--', marker='+', color='k', label='test without noise')
+ plt.plot(range(len(test_acc_noise)), test_acc_noise, marker='o', color='y', label='test with noise')
+ plt.xlabel("epoch")
+ plt.ylabel("acc")
+ plt.title("train_without_noise")
+ plt.legend()
+ plt.show()
+ else:
+ numpy_run([0.1, 'SGD', None])