From 47dee485a4357dc69be10058e38171a73c73a194 Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Sat, 17 Apr 2021 14:32:32 +0800 Subject: [PATCH 1/8] finished Relu --- .../submission/18307130090/numpy_fnn.py | 151 +++++++++++++++ .../submission/18307130090/numpy_mnist.py | 36 ++++ .../submission/18307130090/tester_demo.py | 181 ++++++++++++++++++ .../submission/18307130090/torch_mnist.py | 64 +++++++ assignment-2/submission/18307130090/utils.py | 71 +++++++ 5 files changed, 503 insertions(+) create mode 100644 assignment-2/submission/18307130090/numpy_fnn.py create mode 100644 assignment-2/submission/18307130090/numpy_mnist.py create mode 100644 assignment-2/submission/18307130090/tester_demo.py create mode 100644 assignment-2/submission/18307130090/torch_mnist.py create mode 100644 assignment-2/submission/18307130090/utils.py diff --git a/assignment-2/submission/18307130090/numpy_fnn.py b/assignment-2/submission/18307130090/numpy_fnn.py new file mode 100644 index 0000000..f9a8056 --- /dev/null +++ b/assignment-2/submission/18307130090/numpy_fnn.py @@ -0,0 +1,151 @@ +import numpy as np + + +class NumpyOp: + + def __init__(self): + self.memory = {} + self.epsilon = 1e-12 + + +class Matmul(NumpyOp): + + def forward(self, x, W): + """ + x: shape(N, d) + w: shape(d, d') + """ + self.memory['x'] = x + self.memory['W'] = W + h = np.matmul(x, W) + return h + + def backward(self, grad_y): + """ + grad_y: shape(N, d') + """ + + #################### + # code 1 # + #################### + + return grad_x, grad_W + + +class Relu(NumpyOp): + + def forward(self, x): + self.memory['x'] = x + return np.where(x > 0, x, np.zeros_like(x)) + + def backward(self, grad_y): + """ + grad_y: same shape as x + """ + x=self.memory['x'] + grad_x=np.where(x>0,np.ones_like(x),np.zeros_like(x)) + + return grad_x + + +class Log(NumpyOp): + + def forward(self, x): + """ + x: shape(N, c) + """ + + out = np.log(x + self.epsilon) + self.memory['x'] = x + + return out + + def backward(self, grad_y): + """ + grad_y: same shape as x + """ + + #################### + # code 3 # + #################### + + return grad_x + + +class Softmax(NumpyOp): + """ + softmax over last dimension + """ + + def forward(self, x): + """ + x: shape(N, c) + """ + + #################### + # code 4 # + #################### + + return out + + def backward(self, grad_y): + """ + grad_y: same shape as x + """ + + #################### + # code 5 # + #################### + + return grad_x + + +class NumpyModel: + def __init__(self): + self.W1 = np.random.normal(size=(28 * 28, 256)) + self.W2 = np.random.normal(size=(256, 64)) + self.W3 = np.random.normal(size=(64, 10)) + + + # 以下算子会在 forward 和 backward 中使用 + self.matmul_1 = Matmul() + self.relu_1 = Relu() + self.matmul_2 = Matmul() + self.relu_2 = Relu() + self.matmul_3 = Matmul() + self.softmax = Softmax() + self.log = Log() + + # 以下变量需要在 backward 中更新 + self.x1_grad, self.W1_grad = None, None + self.relu_1_grad = None + self.x2_grad, self.W2_grad = None, None + self.relu_2_grad = None + self.x3_grad, self.W3_grad = None, None + self.softmax_grad = None + self.log_grad = None + + + def forward(self, x): + x = x.reshape(-1, 28 * 28) + + #################### + # code 6 # + #################### + + return x + + def backward(self, y): + for size in y.shape: + y /= size + + #################### + # code 7 # + #################### + + pass + + def optimize(self, learning_rate): + self.W1 -= learning_rate * self.W1_grad + self.W2 -= learning_rate * self.W2_grad + self.W3 -= learning_rate * self.W3_grad diff --git a/assignment-2/submission/18307130090/numpy_mnist.py b/assignment-2/submission/18307130090/numpy_mnist.py new file mode 100644 index 0000000..903d7a7 --- /dev/null +++ b/assignment-2/submission/18307130090/numpy_mnist.py @@ -0,0 +1,36 @@ +import numpy as np +from numpy_fnn import NumpyModel +from utils import download_mnist, batch, mini_batch, get_torch_initialization, plot_curve, one_hot + + +def numpy_run(): + train_dataset, test_dataset = download_mnist() + + model = NumpyModel() + model.W1, model.W2, model.W3 = get_torch_initialization() + + train_loss = [] + + epoch_number = 3 + learning_rate = 0.1 + + for epoch in range(epoch_number): + for x, y in mini_batch(train_dataset): + y = one_hot(y) + + y_pred = model.forward(x.numpy()) + loss = (-y_pred * y).sum(axis=1).mean() + model.backward(y) + model.optimize(learning_rate) + + train_loss.append(loss.item()) + + x, y = batch(test_dataset)[0] + accuracy = np.mean((model.forward(x).argmax(axis=1) == y)) + print('[{}] Accuracy: {:.4f}'.format(epoch, accuracy)) + + plot_curve(train_loss) + + +if __name__ == "__main__": + numpy_run() diff --git a/assignment-2/submission/18307130090/tester_demo.py b/assignment-2/submission/18307130090/tester_demo.py new file mode 100644 index 0000000..62744b4 --- /dev/null +++ b/assignment-2/submission/18307130090/tester_demo.py @@ -0,0 +1,181 @@ +import numpy as np +import torch +from torch import matmul as torch_matmul, relu as torch_relu, softmax as torch_softmax, log as torch_log + +from numpy_fnn import Matmul, Relu, Softmax, Log, NumpyModel +from torch_mnist import TorchModel +from utils import get_torch_initialization, one_hot + +err_epsilon = 1e-3 +err_p = 0.4 + + +def check_result(numpy_result, torch_result=None): + if isinstance(numpy_result, list) and torch_result is None: + flag = True + for (n, t) in numpy_result: + flag = flag and check_result(n, t) + return flag + T = (torch_result * torch.from_numpy(numpy_result) < 0).sum().item() + direction = T / torch_result.numel() < err_p + return direction and ((torch.from_numpy(numpy_result) - torch_result).abs().mean() < err_epsilon).item() + + +def case_1(): + x = np.random.normal(size=[5, 6]) + W = np.random.normal(size=[6, 4]) + + numpy_matmul = Matmul() + numpy_out = numpy_matmul.forward(x, W) + numpy_x_grad, numpy_W_grad = numpy_matmul.backward(np.ones_like(numpy_out)) + + torch_x = torch.from_numpy(x).clone().requires_grad_() + torch_W = torch.from_numpy(W).clone().requires_grad_() + + torch_out = torch_matmul(torch_x, torch_W) + torch_out.sum().backward() + + return check_result([ + (numpy_out, torch_out), + (numpy_x_grad, torch_x.grad), + (numpy_W_grad, torch_W.grad) + ]) + + +def case_2(): + x = np.random.normal(size=[5, 6]) + + numpy_relu = Relu() + numpy_out = numpy_relu.forward(x) + numpy_x_grad = numpy_relu.backward(np.ones_like(numpy_out)) + + torch_x = torch.from_numpy(x).clone().requires_grad_() + + torch_out = torch_relu(torch_x) + torch_out.sum().backward() + + return check_result([ + (numpy_out, torch_out), + (numpy_x_grad, torch_x.grad), + ]) + + +def case_3(): + x = np.random.uniform(low=0.0, high=1.0, size=[3, 4]) + + numpy_log = Log() + numpy_out = numpy_log.forward(x) + numpy_x_grad = numpy_log.backward(np.ones_like(numpy_out)) + + torch_x = torch.from_numpy(x).clone().requires_grad_() + + torch_out = torch_log(torch_x) + torch_out.sum().backward() + + return check_result([ + (numpy_out, torch_out), + (numpy_x_grad, torch_x.grad), + ]) + + +def case_4(): + x = np.random.normal(size=[4, 5]) + + numpy_softmax = Softmax() + numpy_out = numpy_softmax.forward(x) + + torch_x = torch.from_numpy(x).clone().requires_grad_() + + torch_out = torch_softmax(torch_x, 1) + + return check_result(numpy_out, torch_out) + + +def case_5(): + x = np.random.normal(size=[20, 25]) + + numpy_softmax = Softmax() + numpy_out = numpy_softmax.forward(x) + numpy_x_grad = numpy_softmax.backward(np.ones_like(numpy_out)) + + torch_x = torch.from_numpy(x).clone().requires_grad_() + + torch_out = torch_softmax(torch_x, 1) + torch_out.sum().backward() + + return check_result([ + (numpy_out, torch_out), + (numpy_x_grad, torch_x.grad), + ]) + + +def test_model(): + try: + numpy_model = NumpyModel() + torch_model = TorchModel() + torch_model.W1.data, torch_model.W2.data, torch_model.W3.data = get_torch_initialization(numpy=False) + numpy_model.W1 = torch_model.W1.detach().clone().numpy() + numpy_model.W2 = torch_model.W2.detach().clone().numpy() + numpy_model.W3 = torch_model.W3.detach().clone().numpy() + + x = torch.randn((10000, 28, 28)) + y = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 1000) + + y = one_hot(y, numpy=False) + x2 = x.numpy() + y_pred = torch_model.forward(x) + loss = (-y_pred * y).sum(dim=1).mean() + loss.backward() + + y_pred_numpy = numpy_model.forward(x2) + y2 = y.numpy() + loss_numpy = (-y_pred_numpy * y2).sum(axis=1).mean() + + check_flag_1 = check_result(y_pred_numpy, y_pred) + print("+ {:12} {}/{}".format("forward", 10 * check_flag_1, 10)) + except: + print("[Runtime Error in forward]") + print("+ {:12} {}/{}".format("forward", 0, 10)) + return 0 + + try: + + numpy_model.backward(y2) + + check_flag_2 = [ + check_result(numpy_model.log_grad, torch_model.log.grad), + check_result(numpy_model.softmax_grad, torch_model.softmax.grad), + check_result(numpy_model.W3_grad, torch_model.W3.grad), + check_result(numpy_model.W2_grad, torch_model.W2.grad), + check_result(numpy_model.W1_grad, torch_model.W1.grad) + ] + check_flag_2 = sum(check_flag_2) >= 4 + print("+ {:12} {}/{}".format("backward", 20 * check_flag_2, 20)) + except: + print("[Runtime Error in backward]") + print("+ {:12} {}/{}".format("backward", 0, 20)) + check_flag_2 = False + + return 10 * check_flag_1 + 20 * check_flag_2 + + +if __name__ == "__main__": + testcases = [ + ["matmul", case_1, 5], + ["relu", case_2, 5], + ["log", case_3, 5], + ["softmax_1", case_4, 5], + ["softmax_2", case_5, 10], + ] + score = 0 + for case in testcases: + # if case[0]!='relu':continue + try: + res = case[2] if case[1]() else 0 + except: + print("[Runtime Error in {}]".format(case[0])) + res = 0 + score += res + print("+ {:12} {}/{}".format(case[0], res, case[2])) + score += test_model() + print("{:14} {}/60".format("FINAL SCORE", score)) diff --git a/assignment-2/submission/18307130090/torch_mnist.py b/assignment-2/submission/18307130090/torch_mnist.py new file mode 100644 index 0000000..6a5649b --- /dev/null +++ b/assignment-2/submission/18307130090/torch_mnist.py @@ -0,0 +1,64 @@ +import torch +from utils import mini_batch, batch, download_mnist, get_torch_initialization, one_hot, plot_curve + + +class TorchModel: + + def __init__(self): + self.W1 = torch.randn((28 * 28, 256), requires_grad=True) + self.W2 = torch.randn((256, 64), requires_grad=True) + self.W3 = torch.randn((64, 10), requires_grad=True) + + def forward(self, x): + x = x.reshape(-1, 28 * 28) + x = torch.relu(torch.matmul(x, self.W1)) + x = torch.relu(torch.matmul(x, self.W2)) + x = torch.matmul(x, self.W3) + self.softmax = torch.softmax(x, 1) + self.log = torch.log(self.softmax) + self.softmax.retain_grad() # for test only + self.log.retain_grad() # for test only + return self.log + + def optimize(self, learning_rate): + with torch.no_grad(): + self.W1 -= learning_rate * self.W1.grad + self.W2 -= learning_rate * self.W2.grad + self.W3 -= learning_rate * self.W3.grad + + self.W1.grad = None + self.W2.grad = None + self.W3.grad = None + + +def torch_run(): + train_dataset, test_dataset = download_mnist() + + model = TorchModel() + model.W1.data, model.W2.data, model.W3.data = get_torch_initialization(numpy=False) + + train_loss = [] + + epoch_number = 3 + learning_rate = 0.1 + + for epoch in range(epoch_number): + for x, y in mini_batch(train_dataset, numpy=False): + y = one_hot(y, numpy=False) + + y_pred = model.forward(x) + loss = (-y_pred * y).sum(dim=1).mean() + loss.backward() + model.optimize(learning_rate) + + train_loss.append(loss.item()) + + x, y = batch(test_dataset, numpy=False)[0] + accuracy = model.forward(x).argmax(dim=1).eq(y).float().mean().item() + print('[{}] Accuracy: {:.4f}'.format(epoch, accuracy)) + + plot_curve(train_loss) + + +if __name__ == "__main__": + torch_run() diff --git a/assignment-2/submission/18307130090/utils.py b/assignment-2/submission/18307130090/utils.py new file mode 100644 index 0000000..709220c --- /dev/null +++ b/assignment-2/submission/18307130090/utils.py @@ -0,0 +1,71 @@ +import torch +import numpy as np +from matplotlib import pyplot as plt + + +def plot_curve(data): + plt.plot(range(len(data)), data, color='blue') + plt.legend(['loss_value'], loc='upper right') + plt.xlabel('step') + plt.ylabel('value') + plt.show() + + +def download_mnist(): + from torchvision import datasets, transforms + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=(0.1307,), std=(0.3081,)) + ]) + + train_dataset = datasets.MNIST(root="./data/", transform=transform, train=True, download=True) + test_dataset = datasets.MNIST(root="./data/", transform=transform, train=False, download=True) + + return train_dataset, test_dataset + + +def one_hot(y, numpy=True): + if numpy: + y_ = np.zeros((y.shape[0], 10)) + y_[np.arange(y.shape[0], dtype=np.int32), y] = 1 + return y_ + else: + y_ = torch.zeros((y.shape[0], 10)) + y_[torch.arange(y.shape[0], dtype=torch.long), y] = 1 + return y_ + + +def batch(dataset, numpy=True): + data = [] + label = [] + for each in dataset: + data.append(each[0]) + label.append(each[1]) + data = torch.stack(data) + label = torch.LongTensor(label) + if numpy: + return [(data.numpy(), label.numpy())] + else: + return [(data, label)] + + +def mini_batch(dataset, batch_size=128, numpy=False): + return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) + + +def get_torch_initialization(numpy=True): + fc1 = torch.nn.Linear(28 * 28, 256) + fc2 = torch.nn.Linear(256, 64) + fc3 = torch.nn.Linear(64, 10) + + if numpy: + W1 = fc1.weight.T.detach().clone().numpy() + W2 = fc2.weight.T.detach().clone().numpy() + W3 = fc3.weight.T.detach().clone().numpy() + else: + W1 = fc1.weight.T.detach().clone().data + W2 = fc2.weight.T.detach().clone().data + W3 = fc3.weight.T.detach().clone().data + + return W1, W2, W3 -- Gitee From b812f9c1c2c7455973c8f1a72e035023a4c9f970 Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Tue, 27 Apr 2021 15:40:04 +0800 Subject: [PATCH 2/8] operator finished --- .../submission/18307130090/numpy_fnn.py | 60 +++++++++++-------- .../submission/18307130090/numpy_mnist.py | 8 ++- .../submission/18307130090/tester_demo.py | 17 +++--- .../submission/18307130090/torch_mnist.py | 19 ++++-- 4 files changed, 62 insertions(+), 42 deletions(-) diff --git a/assignment-2/submission/18307130090/numpy_fnn.py b/assignment-2/submission/18307130090/numpy_fnn.py index f9a8056..9d03206 100644 --- a/assignment-2/submission/18307130090/numpy_fnn.py +++ b/assignment-2/submission/18307130090/numpy_fnn.py @@ -24,10 +24,9 @@ class Matmul(NumpyOp): """ grad_y: shape(N, d') """ - - #################### - # code 1 # - #################### + x, W = self.memory['x'], self.memory['W'] + grad_x = np.matmul(grad_y, W.T) + grad_W = np.matmul(x.T, grad_y) return grad_x, grad_W @@ -42,8 +41,8 @@ class Relu(NumpyOp): """ grad_y: same shape as x """ - x=self.memory['x'] - grad_x=np.where(x>0,np.ones_like(x),np.zeros_like(x)) + x = self.memory['x'] + grad_x = np.where(x > 0, np.ones_like(x), np.zeros_like(x)) return grad_x @@ -64,10 +63,8 @@ class Log(NumpyOp): """ grad_y: same shape as x """ - - #################### - # code 3 # - #################### + x=self.memory['x'] + grad_x=grad_y*np.reciprocal(x+self.epsilon) return grad_x @@ -81,10 +78,11 @@ class Softmax(NumpyOp): """ x: shape(N, c) """ - - #################### - # code 4 # - #################### + exp_x=np.exp(x) + exp_sum=np.sum(exp_x,axis=1,keepdims=True) + out=exp_x/exp_sum + self.memory['x']=x + self.memory['out']=out return out @@ -92,21 +90,34 @@ class Softmax(NumpyOp): """ grad_y: same shape as x """ - - #################### - # code 5 # - #################### + sm = self.memory['out'] + Jacobs = np.array([np.diag(r) -np.outer(r,r) for r in sm]) + + grad_y = grad_y[:, np.newaxis, :] + grad_x = np.matmul(grad_y, Jacobs).squeeze(axis=1) return grad_x +class NumpyLoss: + + def __init__(self): + self.target = None + + def get_loss(self, pred, target): + self.target = target + return (-pred * target).sum(axis=1).mean() + + def backward(self): + return -self.target / self.target.shape[0] + + class NumpyModel: def __init__(self): self.W1 = np.random.normal(size=(28 * 28, 256)) self.W2 = np.random.normal(size=(256, 64)) self.W3 = np.random.normal(size=(64, 10)) - - + # 以下算子会在 forward 和 backward 中使用 self.matmul_1 = Matmul() self.relu_1 = Relu() @@ -115,8 +126,8 @@ class NumpyModel: self.matmul_3 = Matmul() self.softmax = Softmax() self.log = Log() - - # 以下变量需要在 backward 中更新 + + # 以下变量需要在 backward 中更新。 softmax_grad, log_grad 等为算子反向传播的梯度( loss 关于算子输入的偏导) self.x1_grad, self.W1_grad = None, None self.relu_1_grad = None self.x2_grad, self.W2_grad = None, None @@ -124,7 +135,6 @@ class NumpyModel: self.x3_grad, self.W3_grad = None, None self.softmax_grad = None self.log_grad = None - def forward(self, x): x = x.reshape(-1, 28 * 28) @@ -136,9 +146,7 @@ class NumpyModel: return x def backward(self, y): - for size in y.shape: - y /= size - + #################### # code 7 # #################### diff --git a/assignment-2/submission/18307130090/numpy_mnist.py b/assignment-2/submission/18307130090/numpy_mnist.py index 903d7a7..c18db94 100644 --- a/assignment-2/submission/18307130090/numpy_mnist.py +++ b/assignment-2/submission/18307130090/numpy_mnist.py @@ -1,5 +1,5 @@ import numpy as np -from numpy_fnn import NumpyModel +from numpy_fnn import NumpyModel, NumpyLoss from utils import download_mnist, batch, mini_batch, get_torch_initialization, plot_curve, one_hot @@ -7,6 +7,7 @@ def numpy_run(): train_dataset, test_dataset = download_mnist() model = NumpyModel() + numpy_loss = NumpyLoss() model.W1, model.W2, model.W3 = get_torch_initialization() train_loss = [] @@ -19,8 +20,9 @@ def numpy_run(): y = one_hot(y) y_pred = model.forward(x.numpy()) - loss = (-y_pred * y).sum(axis=1).mean() - model.backward(y) + loss = numpy_loss.get_loss(y_pred, y) + + model.backward(numpy_loss.backward()) model.optimize(learning_rate) train_loss.append(loss.item()) diff --git a/assignment-2/submission/18307130090/tester_demo.py b/assignment-2/submission/18307130090/tester_demo.py index 62744b4..504b3ee 100644 --- a/assignment-2/submission/18307130090/tester_demo.py +++ b/assignment-2/submission/18307130090/tester_demo.py @@ -2,11 +2,11 @@ import numpy as np import torch from torch import matmul as torch_matmul, relu as torch_relu, softmax as torch_softmax, log as torch_log -from numpy_fnn import Matmul, Relu, Softmax, Log, NumpyModel +from numpy_fnn import Matmul, Relu, Softmax, Log, NumpyModel, NumpyLoss from torch_mnist import TorchModel from utils import get_torch_initialization, one_hot -err_epsilon = 1e-3 +err_epsilon = 1e-6 err_p = 0.4 @@ -16,6 +16,7 @@ def check_result(numpy_result, torch_result=None): for (n, t) in numpy_result: flag = flag and check_result(n, t) return flag + # print((torch.from_numpy(numpy_result) - torch_result).abs().mean().item()) T = (torch_result * torch.from_numpy(numpy_result) < 0).sum().item() direction = T / torch_result.numel() < err_p return direction and ((torch.from_numpy(numpy_result) - torch_result).abs().mean() < err_epsilon).item() @@ -74,6 +75,7 @@ def case_3(): return check_result([ (numpy_out, torch_out), + (numpy_x_grad, torch_x.grad), ]) @@ -111,6 +113,7 @@ def case_5(): def test_model(): try: + numpy_loss = NumpyLoss() numpy_model = NumpyModel() torch_model = TorchModel() torch_model.W1.data, torch_model.W2.data, torch_model.W3.data = get_torch_initialization(numpy=False) @@ -128,8 +131,7 @@ def test_model(): loss.backward() y_pred_numpy = numpy_model.forward(x2) - y2 = y.numpy() - loss_numpy = (-y_pred_numpy * y2).sum(axis=1).mean() + numpy_loss.get_loss(y_pred_numpy, y.numpy()) check_flag_1 = check_result(y_pred_numpy, y_pred) print("+ {:12} {}/{}".format("forward", 10 * check_flag_1, 10)) @@ -140,11 +142,11 @@ def test_model(): try: - numpy_model.backward(y2) + numpy_model.backward(numpy_loss.backward()) check_flag_2 = [ - check_result(numpy_model.log_grad, torch_model.log.grad), - check_result(numpy_model.softmax_grad, torch_model.softmax.grad), + check_result(numpy_model.log_grad, torch_model.log_input.grad), + check_result(numpy_model.softmax_grad, torch_model.softmax_input.grad), check_result(numpy_model.W3_grad, torch_model.W3.grad), check_result(numpy_model.W2_grad, torch_model.W2.grad), check_result(numpy_model.W1_grad, torch_model.W1.grad) @@ -169,7 +171,6 @@ if __name__ == "__main__": ] score = 0 for case in testcases: - # if case[0]!='relu':continue try: res = case[2] if case[1]() else 0 except: diff --git a/assignment-2/submission/18307130090/torch_mnist.py b/assignment-2/submission/18307130090/torch_mnist.py index 6a5649b..6d3e214 100644 --- a/assignment-2/submission/18307130090/torch_mnist.py +++ b/assignment-2/submission/18307130090/torch_mnist.py @@ -8,17 +8,26 @@ class TorchModel: self.W1 = torch.randn((28 * 28, 256), requires_grad=True) self.W2 = torch.randn((256, 64), requires_grad=True) self.W3 = torch.randn((64, 10), requires_grad=True) + self.softmax_input = None + self.log_input = None def forward(self, x): x = x.reshape(-1, 28 * 28) x = torch.relu(torch.matmul(x, self.W1)) x = torch.relu(torch.matmul(x, self.W2)) x = torch.matmul(x, self.W3) - self.softmax = torch.softmax(x, 1) - self.log = torch.log(self.softmax) - self.softmax.retain_grad() # for test only - self.log.retain_grad() # for test only - return self.log + + self.softmax_input = x + self.softmax_input.retain_grad() + + x = torch.softmax(x, 1) + + self.log_input = x + self.log_input.retain_grad() + + x = torch.log(x) + + return x def optimize(self, learning_rate): with torch.no_grad(): -- Gitee From 8b94fbafe93be5b8e34d6fcf16ff8c83de21e051 Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Wed, 28 Apr 2021 09:56:34 +0800 Subject: [PATCH 3/8] assignment-2 finished --- assignment-2/submission/18307130090/README.md | 282 ++++++++++++++++++ .../submission/18307130090/img/Adam.png | Bin 0 -> 17773 bytes .../submission/18307130090/img/SGDM.png | Bin 0 -> 18198 bytes .../18307130090/img/SGD_batch_size.png | Bin 0 -> 18628 bytes .../18307130090/img/SGD_learning_rate.png | Bin 0 -> 19947 bytes .../submission/18307130090/img/SGD_normal.png | Bin 0 -> 17942 bytes .../submission/18307130090/img/fnn_model.png | Bin 0 -> 20403 bytes .../submission/18307130090/numpy_fnn.py | 174 ++++++++--- .../submission/18307130090/numpy_mnist.py | 34 ++- 9 files changed, 432 insertions(+), 58 deletions(-) create mode 100644 assignment-2/submission/18307130090/README.md create mode 100644 assignment-2/submission/18307130090/img/Adam.png create mode 100644 assignment-2/submission/18307130090/img/SGDM.png create mode 100644 assignment-2/submission/18307130090/img/SGD_batch_size.png create mode 100644 assignment-2/submission/18307130090/img/SGD_learning_rate.png create mode 100644 assignment-2/submission/18307130090/img/SGD_normal.png create mode 100644 assignment-2/submission/18307130090/img/fnn_model.png diff --git a/assignment-2/submission/18307130090/README.md b/assignment-2/submission/18307130090/README.md new file mode 100644 index 0000000..b4594b3 --- /dev/null +++ b/assignment-2/submission/18307130090/README.md @@ -0,0 +1,282 @@ +# PRML-2021 Assignment2 + +姓名:夏海淞 + +学号:18307130090 + +## 简述 + +在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了SGD-Momentum和Adam优化算法,并比较了它们的性能。 + +## 算子的反向传播 + +### `Matmul` + +`Matmul`的计算公式为: +$$ +Y=X\times W +$$ +其中$Y,X,W$分别为$n\times d',n\times d,d\times d'$的矩阵。 + +由[神经网络与深度学习-邱锡鹏](https://nndl.github.io/nndl-book.pdf)中公式(B.20)和(B.21),有 +$$ +\frac{\part Y}{\part W}=\frac{\part(X\times W)}{\part W}=X^T\\ +\frac{\part Y}{\part X}=\frac{\part(X\times W)}{\part X}=W^T +$$ +结合链式法则和矩阵运算法则,可得 +$$ +\nabla_X=\nabla_Y\times W^T\\ +\nabla_W=X^T\times \nabla_Y +$$ + +### `Relu` + +`Relu`的计算公式为: +$$ +Y_{ij}=\begin{cases} +X_{ij}&X_{ij}\ge0\\ +0&\text{otherwise} +\end{cases} +$$ +因此有 +$$ +\frac{\part Y_{ij}}{\part X_{ij}}=\begin{cases} +1&X_{ij}>0\\ +0&\text{otherwise} +\end{cases} +$$ +结合链式法则,可得 +$$ +{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\part Y_{ij}}{\part X_{ij}} +$$ + +### `Log` + +`Log`的计算公式为 +$$ +Y_{ij}=\ln(X_{ij}+\epsilon),\epsilon=10^{-12} +$$ +因此有 +$$ +\frac{\part Y_{ij}}{\part X_{ij}}=\frac1{X_{ij}+\epsilon} +$$ +结合链式法则,可得 +$$ +{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\part Y_{ij}}{\part X_{ij}} +$$ + +### `Softmax` + +`Softmax`的计算公式为 +$$ +Y_{ij}=\frac{\exp\{X_{ij} \}}{\sum_{k=1}^c\exp\{X_{ik} \}} +$$ +其中$Y,X$均为$N\times c$的矩阵。容易发现`Softmax`以$X$的每行作为单位进行运算。因此对于$X,Y$的行分量$X_k,Y_k$,有 +$$ +\frac{\part Y_{ki}}{\part X_{kj}}=\begin{cases} +\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\ +-\frac{\exp\{X_{ki} \}\exp\{X_{kj} \}}{(\sum_t\exp\{X_{kt}\})^2}=-Y_{ki}Y_{kj}&i\not=j +\end{cases} +$$ +因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\part Y_{ki}}{\part X_{kj}}$。结合链式法则,可得 +$$ +\nabla_X=\nabla_Y\times J +$$ +将行分量组合起来,就得到了反向传播的最终结果。 + +## 模型构建与训练 + +### 模型构建 + +#### `forward` + +参考`torch_mnist.py`中`TorchModel`方法的模型,使用如下代码构建: + +```python +def forward(self, x): + x = x.reshape(-1, 28 * 28) + + x = self.relu_1.forward(self.matmul_1.forward(x, self.W1)) + x = self.relu_2.forward(self.matmul_2.forward(x, self.W2)) + x = self.matmul_3.forward(x, self.W3) + + x = self.log.forward(self.softmax.forward(x)) + + return x +``` + +模型的计算图如下: + +![](./img/fnn_model.png) + +#### `backward` + +根据模型的计算图,按照反向的计算顺序依次调用对应算子的反向传播算法即可。 + +```python +def backward(self, y): + self.log_grad = self.log.backward(y) + self.softmax_grad = self.softmax.backward(self.log_grad) + self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad) + self.relu_2_grad = self.relu_2.backward(self.x3_grad) + self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad) + self.relu_1_grad = self.relu_1.backward(self.x2_grad) + self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad) + + return self.x1_grad +``` + +#### `mini_batch` + +`mini_batch`的作用是提高模型的训练速度,同时得到较好的优化效果。传统的批处理方法对整个数据集计算平均的损失函数值,随后计算相应梯度进行反向传播。当训练数据集容量较大时,对训练速度造成严重影响;而随机方法则对数据集的每个样本计算损失函数值,随后计算相应梯度进行反向传播。此时数据集容量不对训练速度产生影响,然而由于样本的随机性,可能导致参数无法收敛到最优值,在最优值附近震荡。因此一个折中的方法是将数据集划分为若干批次,在提高训练速度的同时保证了较好的收敛效果。 + +在本次实验中,我参照`utils.py`中的`mini_batch`,在`numpy_mnist.py`中重新实现了`mini_batch`方法: + +```python +def mini_batch(dataset, batch_size=128): + data = np.array([np.array(each[0]) for each in dataset]) + label = np.array([each[1] for each in dataset]) + + size = data.shape[0] + index = np.arange(size) + np.random.shuffle(index) + + return [(data[index[i:i + batch_size]], label[index[i:i + batch_size]]) for i in range(0, size, batch_size)] +``` + +### 模型训练 + +设定`learning_rate=0.1`,`batch_size=128`,`epoch_number=10`。训练结果如下: + +``` +[0] Accuracy: 0.9486 +[1] Accuracy: 0.9643 +[2] Accuracy: 0.9724 +[3] Accuracy: 0.9738 +[4] Accuracy: 0.9781 +[5] Accuracy: 0.9768 +[6] Accuracy: 0.9796 +[7] Accuracy: 0.9802 +[8] Accuracy: 0.9800 +[9] Accuracy: 0.9796 +``` + + + +尝试缩减`batch_size`的大小,设定`batch_size=64`。训练结果如下: + +``` +[0] Accuracy: 0.9597 +[1] Accuracy: 0.9715 +[2] Accuracy: 0.9739 +[3] Accuracy: 0.9771 +[4] Accuracy: 0.9775 +[5] Accuracy: 0.9803 +[6] Accuracy: 0.9808 +[7] Accuracy: 0.9805 +[8] Accuracy: 0.9805 +[9] Accuracy: 0.9716 +``` + + + +尝试降低`learning_rate`,设定`learning_rate=0.01`。训练结果如下: + +``` +[0] Accuracy: 0.8758 +[1] Accuracy: 0.9028 +[2] Accuracy: 0.9143 +[3] Accuracy: 0.9234 +[4] Accuracy: 0.9298 +[5] Accuracy: 0.9350 +[6] Accuracy: 0.9397 +[7] Accuracy: 0.9434 +[8] Accuracy: 0.9459 +[9] Accuracy: 0.9501 +``` + + + +根据实验结果,可以得出以下结论: + +当学习率和批处理容量合适时,参数的收敛速度随着学习率的减小而减小,而参数的震荡幅度随着批处理容量的减小而增大。 + +## 梯度下降算法的改进 + +传统的梯度下降算法可以表述为: +$$ +w_{t+1}=w_t-\eta\cdot\nabla f(w_t) +$$ +尽管梯度下降作为优化算法被广泛使用,它依然存在一些缺点,主要表现为: + +- 参数修正方向完全由当前梯度决定,导致当学习率过高时参数可能在最优点附近震荡; +- 学习率无法随着训练进度改变,导致训练前期收敛速度较慢,后期可能无法收敛。 + +针对上述缺陷,产生了许多梯度下降算法的改进算法。其中较为典型的是`Momentum`算法和`Adam`算法。 + +### `Momentum` + +针对“参数修正方向完全由当前梯度决定”的问题,`Momentum`引入了“动量”的概念。 + +类比现实世界,当小球从高处向低处滚动时,其运动方向不仅与当前位置的“陡峭程度”相关,也和当前的速度,即先前位置的“陡峭程度”相关。因此在`Momentum`算法中,参数的修正值不是取决于当前梯度,而是取决于梯度的各时刻的指数移动平均值: +$$ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\ +w_{t+1}=w_t-\eta\cdot m_t +$$ +指数移动平均值反映了参数调整时的“惯性”。当参数调整方向正确时,`Momentum`有助于加快训练速度,减少震荡的幅度;然而当参数调整方向错误时,`Momentum`会因为无法及时调整方向造成性能上的部分损失。 + +使用`Momentum`算法的训练结果如下: + +``` +[0] Accuracy: 0.9444 +[1] Accuracy: 0.9627 +[2] Accuracy: 0.9681 +[3] Accuracy: 0.9731 +[4] Accuracy: 0.9765 +[5] Accuracy: 0.9755 +[6] Accuracy: 0.9768 +[7] Accuracy: 0.9790 +[8] Accuracy: 0.9794 +[9] Accuracy: 0.9819 +``` + + + +可以看出相较传统的梯度下降算法并无明显优势。 + +### `Adam` + +针对“学习率无法随着训练进度改变”的问题,`Adam`在`Momentum`的基础上引入了“二阶动量”的概念。 + +`Adam`的改进思路为:由于神经网络中存在大量参数,不同参数的调整频率存在差别。对于频繁更新的参数,我们希望适当降低其学习率,提高收敛概率;而对于其他参数,我们希望适当增大其学习率,加快收敛速度。同时,参数的调整频率可能发生动态改变,我们也希望学习率能够随之动态调整。 + +因为参数的调整值与当前梯度直接相关,因此取历史梯度的平方和作为衡量参数调整频率的标准。如果历史梯度平方和较大,表明参数被频繁更新,需要降低其学习率。因此梯度下降算法改写为: +$$ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\ +V_t=V_{t-1}+\nabla^2f(w_t)\\ +w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t +$$ +然而,由于$V_t$关于$t$单调递增,可能导致训练后期学习率过低,参数无法收敛至最优。因此将$V_t$也改为指数移动平均值,避免了上述缺陷: +$$ +m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\ +V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\ +w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t +$$ +使用`Adam`算法的训练结果如下: + +``` +[0] Accuracy: 0.9657 +[1] Accuracy: 0.9724 +[2] Accuracy: 0.9759 +[3] Accuracy: 0.9769 +[4] Accuracy: 0.9788 +[5] Accuracy: 0.9778 +[6] Accuracy: 0.9775 +[7] Accuracy: 0.9759 +[8] Accuracy: 0.9786 +[9] Accuracy: 0.9779 +``` + + + +可以看出相较传统的梯度下降算法,损失函数值的震荡幅度有所减小,而收敛速度与传统方法相当。 \ No newline at end of file diff --git a/assignment-2/submission/18307130090/img/Adam.png b/assignment-2/submission/18307130090/img/Adam.png new file mode 100644 index 0000000000000000000000000000000000000000..fe0326ebad52ad9356bdd7410834d9d61e9e5152 GIT binary patch literal 17773 zcmeHv`6JX{^!I!06$#lAipr;KMfPP>mP)8(UsK3VvM)0#Aw{xfj}~N4ma$GEyCK`y z$ugF)8~gIyN%fh}^L?J@FL?T)AKr7`_kHiV=e*9j=e~}ho9Zfb2ag?uAc*eTRmIy7 zM23VQ*f0$lcv6zDnh!xr7q2N^y5nv5=KuPppNMbq(r0uMe;K^{y=89QC8m2&C;*=HGt&aT!z15p{< z`Hn}kP2O+~Ia#?*x%_;i#=Y)=_JMFl@YxF0HW>&~z%!sBNLK+OhfHBGS_nx=aUOzL zj{N_Je-%>CjE;$kVP|LeTE+Jqf&wj5UI%*$eJU}Y994Npnah@S6jDezqGcq!vdD`f z)3v>j4r`bA06|Vlvp0i2WwUOshZ({IzTcpIv#yf_g97;v+{7F(g%q~0AwAWB-hXP4 zbvTo;K}QgbZxfl&&`kwBJw26>;~V68r@C@=F`J(>0*QhOmyXMOyMKE|Hx($Cmsr!= z)rHu9QMR$E$uQ5XX1X(D4d3IuFnq};hwfRR3qSE~Gdr8L{;R*fgtX-0s7~zhBd1E1KETj*$$KzGX0vxupaiR!i%RyefzUr5U@Rk{qadFqjbK) zV7|b5qA4Z?J2zNvkaX+B-EP<46FJ0jpzsLV={a${D&@z56pryc$nu_izSbNQ(~-;N z+zPi301-)8nN4@L{XmIU+RfxU>9_6r3XKY^BlS(e;y9%r9ohMy8g1W^))sMAv!hC4 z(EZ+cL!@eyfXC)kYABan?@p|}ph02T-tJaHKwpuAuyNTsz9+wC#lWGQ5M8ZCG}8orO07ucd1d>q|AM>x4;IzF~$^`;Ip-AH(0h23g0dE-PshbSpPX1 z3hy%Ry5POO0EA?U-W?OSeY3pqQeyCpw96;=p81L`^|6&m9pm(Dj3WX*1N znuGI1)2)Qcoz*xjJr~@!?2Uczm#NCVoq0pd0gCLB=}(DrUe*bB(~XS7W~P&Ada1eG zPpt90u6wvhwL06!TIU9JXBn)OuMIj|$nCC={;Wv|I7}wVpa?gmm;YxE-jSBPMt=32 z|9K>zMxub((q!8^^-AnlpOrQh@}2;Xn#@B&#ip%qF9ov*-gd4@%((i1;pNWOnwy!7 z@GRJxfzqNkM@m1XJb#{G)d@%JZXf29?f~`$HV^DROMaA~0ZW|;G(S{nBEUO%CFUO3 zs!!Tk(|?vO?)hbVueG9lkp4va(W)!03}m|4FIMeox86y+%^9XB21OftUUPYF)R%X! zcJNWV+G*9OGN9aezGt3m18y3!i`Bd;A>e-|Glag&a|-4JU}x{JQpOeD8!J=oN`6MU zrk75*LBVUSnR=qQ4~agjddz5pB75e>GO#HbMv7<|RcV@G#Une&mz@0M>)6`k(Ca(c ztGTDEPEHx)ImU?-?UGTP z(bh@5YPusmoTkp9V#8vj${#qAi+UBKbE;b08CM|$@E0)G9Mx!{6z5^IAGgm+7mGS>1c}&D8JwMu0?DTG$L8di5Jss?lNvUfVb}!qc(iaP{@L!iz ztzL~4vw23(ZJcMO#D_HTnib27To~Hhbpx&$%qq+!=ecr7xa`QbhY&!66+*u>88)tH zC<^AiG5%+=Z9-ShnN*ZrEfBZsm5e?);DJkUndwqfRn55l;km8?{y6n#jJq{9H(B@d z?GM*^P~W(=T;hOJq$RPMc;}Ta151<{kI<~={z!&SVI}AeKKu7p_n_hMyr7s@S|vNZj!sR=CVez#2p%PXE&uzX#h@&B6c2Xv z@1uzTZZKAM7Re)vU2k@@n9cMK1i_W|SGY072Oe1i{rr8TmkYMDo)4(x-$!4rQaKh_ zlddO9APi>s;W_DgKB$^QN$x46k5(SBqa)7&%lLPwyvtOM{s&2i($0Df^R7BT`lu@t zj9!>YI#hv;ITU^N9qDp*JlWA7uKyjK{{?J&EL$w!H#Fj(TW0mIPj6mEDZx$S#6j9=Y6AAQ zq>2q~52840hC4{FHeV>b1#mUnSLDW(=r-t4?zQIHOwYRs(}WqOJpHr6k}58AhE5Jk zh9o}QE=#|_*n3e4H}dw<6M#_@vE0flsgeNsA=WyIr8EMw}{{1Ba?i#xjJi*Z_x}OZAvtx9(VFVz85-9 z0_1b1+_2+JAf@B+0duaf?mXv{2$pCh{b+JnmKg2m`R>~F@Q@c@W%zfx7myAha4(HF zV!)CbFHqBS-N`R;nGpvOMNH1JCH`kkFnn)&xiMOpTh=`n9(^($Qa}45gARIq^_NcH zoXQThdGh@Q`#TE&ke^gSxmsS2)`sQ)K)1HG9{v7u2gOj^l?8HrEGzIPA=K+ro3Oq@ zK{m9W-lhPAbV$j;bupj^ZHGYOiS+sH&aQYGDo2Zt@`s?|IHFc5awXIuZn;kwGHCtG zrl&n84BzZfNlCtrffMu~Xy~`k_n<`C&?+I5;FMDlw4Y(++vuD}(sOrN!XR&^-#EKw z50YSf@V>w(#X|BG(wX3Um}rOLyR9T|D9dV5F65PJl0yQ4`)@68&3}dD9Tk!&p#`QLrhHG`0Xzt?#9I1Sapd?l%j^MdJlX9 zvdEt3#1$8AV`h1fa+(6^FJtp)X8imQCf!XEvkhjkp29tgLBCCb^^(A&S`<=*v622xF?8@kz~~I z1`%Ym5b17Z+y)`~_38hP$|_8{TN(O5uPI8TBAuUzaQl0%J=gf^A>-nkBwK+W3pa;^ zb&1i{``&NumzU5ECm~1&)`V@hYmg2rW)12DzlPO0NjKb+Z|OGKIs$?U%Z%HP_t!S@ zwxX6TH|p&gqXYr!v$N;MikU;0q?|ur(Yi|R4$@b8`1V3|5K|}znqMco|KSY_i;opI z?UkuAwFTMH@ZB^Laja86G+6FcB+u&res{G4zib?K?kjY3Mcs+1(OR7p>7XuI<(VucW zSK8%1goz_27~WT)TEnMMm!bD!jl__5{vgK_xnWK9lSmj^giLZ`i5tDMNss}EOc?Bc zL5odKZGd*?Wd8U=cJ+bbMxoM7XIzV?16UdUOFI}i0iLw~;sKV#a=OBX2qsf~W z@|;GM+u){XBse0cn}g_96Dq&?fH2(&4#B9cxl(YXk_K_tFo&_K4w()mimLqC%Zyi4 zj+1KQo-30lH)|?EXuRDhyWAuSf|8)#=lkIFD1;h&ol~*dKp-0d8&cw84&j_y#2_?1 z(M|m?BJWk#ArpM10lG!8?|XV@1|Ru)oCB#hOm|1awh7&&rf?2vi6ONWn^uq)W$KZN zSjJx>febp11}ITV^UuTq9%SC)Wk+umPlHMb|8LNE$pLU>N%fB&_i?4Rbmla4;@NLV zh`so06ZTOj|D&2Ygjn<^@hH7~3lSuI&yeIp_$E7DKL^CMS z0_7N&d-i^@0(r#Ic94o}+H!;4KP2=DChPPo1trc`$Ci8@oI`B}N?gKtl%BH) zmblL9x3%Y6V!Yf#Fsi0{UJ5m6-p2r5K!v?Rvk|*VX-FP?&kpG>^YO?G~?( zR3#gGZsjgOB5H18zWi(EHR8w&t&Bp)k;@lGD00_}Zp4UeKJ>jHVoJnkOMpAfeGm(( z6r@OE$z8~r|O@K!!vDQP0=P&(HH-90_dpuG{eyNVv4-Xj^*&A6{Oc1(F z5VHl{(~TQ7bx0%cJlf&K(Z6tO#(@UjldfN2-B)O*si~P$I#=?Hjzc>`+palQY=0VA z`t{K6(B5BD2k%sYB1xkA&jWd*{HxEneb+~t<0NKO*XzIs9tP14JB|J;%!T%U9tPtD z`ydVWAsb7zfR~-zpNnTY?eN;rFYF;k-RE`?Ykk(_h5fIBQz!U!u>H@LgJ0*JdO+C% zmxS;g^uRU0xp3u|_A^~<*wOP9e_;b?>#&$jS0>1@4dw~KVX#uX2oNYErv1O{Y1|DG zHmQ&uoPVkWJnvTmkViMHBy@;XXd9o80lIJ*@n`+5FJBbxaM93T!vnL(V9M#o-7O%V zwkc-z7vco0vuN-K$y6O2dqCNR8?l`%JR;0X-u3Y%?Xbj92Z;o$IKXN<19tSi+P@h@ z%_-!bm+U6fw%L6&F+FPmj>dlm=6zi;HPQ3aE(Otg_;Vs)vKBA?__EFovf@?|5!2b; zdWcI-|BodQfy2j(9qmvC%2E5vW?qI`>gw1om{pV0%Q}VzSG)oiq#JNp1ZjdW`tu&J zAX$K+KR&2RSp9hKzW5VtY|hQ(%NtAM!sRQs_YF8Bo9DM%5w{bCOO$!gdg8*ZLff7K zo9?;3qMUoRA)t)Z5xoqw55i-w&0oOFqcaUF1)IabWH#hB_2Yw?8K}>=On0=gEWZqc z&0s-tkdgDRK7M&dRAQpgnPcL%k{(NA3a(J?R`i8O)2GPSBX$0@G7fZtw2P;hm(F6v z)||}FN_&#~&mdp{r2ymi?McT(kbHeSn$+?^wbkg$gRnQ)N(5qlupHDZJ)6b4ZLztR z_6^I~k{$g{nRqt*vFmIvp@Bc=3pji{XiIr{OzfD*{kl!U48fnc<=N%hR11L& zf0cEC-c&_d`8!yK*LQZyqx{-8l5niN@(u*T3$} z(CN0^B6>TboObx`w|~{dg*Ndabi#KRL!UGAM*%Oe#3YJZG}SdU{9s)In?>TLx!4}& z(lQ+&|MUP8s6xKStR{dR`T-IxRbf({5Jv=srkV1M^WZ7~NXC0F)tl|8$6maaK={2~4XOpi?q8P%sREYaByzrTZ}do>n{& zaQ8mC!v#%F{jW_qK_-vdl!yhfWyh7tHokbr$2PvL$yWkaxx&N37jlpyga(bi48YB9H$?Wl&ZwIuWN^c_rX5GZTk|{m~tAD%#1e ztT=sTqh}qJh`sT~=uox`RYi@j{(wTptM7F{v*S~@g+Ph%$j;9p?JRv1tIc4Uhqd*z zRcgXS-%H6q!u%*RkiHO8-2`Ghdsq_)FmaqErlP=xSfKe6JzpL_OTHfapCf?s%h#}e zRQ?~V0mq=8M&U-NV$+&5%IX-0!TQs~Nu@G!8lXAstv}lf(C7x|;|Y;dP;MO&(tb_I zW$H43-(P2?dS+hWEz=o)l8fI0b2t7g5NktR5;6PO7clY?{F9NL)_DQ2aVFe+2N{dnb_lNz48sx-vjfApbM*jEwkw z%p}Zl(hfJ>@L_?_+`n@|pG^vKNjW3p5kFx6@YTcVMnS&bV@a$z{C#njgLXJIiCAFR zzPo>-am6Kkg?yK;kq)-EZFwFUK-)$t)@=mDejXqda~lHFsO4%W6~o8@v6Wk-Vrzro zqhFtp&SSvM}(Os6}#v6vr4bAFjHu4CY4wv__m+G+P1zmg@(Lxh?NBT-v@L)|a%j--@LKiCK#bF}A>ObUFvq3_rdJ^RDG2B02ZRX_OL$ zhi=yXr}TMx|x(_us$7Xsf& z8EZJA`3t9vh|{Q#Jo%C81u)Q3;w)eDU%5fY20X|$=6`(fTp7CvxQvr_nSTG_L&1Z# zpIhLNXh}^5a)WD5464(rPtX55pF3AY@Anrwfjg3SDxt=Owt671K6fWA*RD_8j-3Gw z5Vk7nUZMTKC&42P9Y`tI_WCGsv*?MFIA{e0mk(C>Y<-$)YH9*^wQxd*CW;l5ZY#V! zvP4BR$tNhz%Y21Q{fUdaOZ)0u06iVFD}}My0MGXRYZ6 z7iHFmo{EsnkBxi`B*DxXE_3}Tb&b8Enm8l0##F8UVnM6cH zzL-A+NIWa2EMR&c-47EGrcl$$bLYD%R@1Vfrge#RyX0p zc%UzkZSp-2t%pJ(LEc=CyA0h1T=v!wd#P*Red*r-rSCDI^LSR>=F67V#S&s-^xi4I zYz@!t4*_?#ph)XA2kb5^j%4pHAL9fiNmW%<4jETlPtQ%eq5tr3aIAceh0sV2g?t0w3xtFMTU%X<98HXAq4V{BpZpyg zxYW~U*`?{F&+eO3y3YPcqJhp={S~958{V1=Z)f70#!y0vnBULmE84F}Tts%RSfx&p zJb&BT?-CnmX$b@FX7<%ouE-w}0?&w_lAxMFf%|%x|AY{5l!^a+Z^LLp9kvmFMto&WpBfSSQueX*S>2>F05 z69)(Zlt+)1fDGe0GwHLH$#)V@yq-M;;reLrt1snXLNpceJjyqkh^(qV&7$m^;O`H*0;@;jj1auXi)e{ zN<&3$+kQq)dwFPSbh2%@iqbc~J+(1)=n*lNA3y^|oJLr^ z`iGWS`k%~;-HQU}83sdlg0+vfJUB=Oa8pBzHAE>?L;hD-UzuKL=XTL(Z=vAL^xKXj zpj3$3P=v^@ni6$bua!u57~7TQnK@C(x#rwM&E2HuK7QTDULGVSjBmg~RUbxLJ|Qpk zDdo!8Z~({u%Gim*zK1Nvd6$#oG+2#}dQ(DIW&a(jCBZY()7k`)RXJSd;b+zUxP9() z<@)8sS6UFoc_f7eagB!KJ;TV^-Vc_aQpF#o?LPhl=jFBRwbC)k5{~EouapuyTDa_l z$^E4<^xiJ3_odFEQn$CtbUq^yWG`&cA^a5=OCIrvO%`;ip}g6#wTiDa4e8028}!mZh}tK^1YVa~ zYn8W$3mTBgxK1WO!+RJ=&EW z$#-wQ#~ClhAhoqgS$cs6Vmlh_IVVbnMYbAV9DK^{+bW~P0XIg79=4;GT%q(|8XtdD zWM7-e^fO$8EH$2Sebqp!+(K;uMNK~)3`1hf?qGHlA&w)l!QTx$J>U9xm1|za$#W$y zF`=zXl06j=~&CV$k`ZXwW zVWZ!t7AFjt;L^ey^urPOdBc)1MyWi&eS>KKMCw{C2XAa+H&gNu_M_LzS? z;0D20Zf1`!TWvntFw~T3oiAgWy6K&nPFSdB0$0>;o*T)n(ZOt=$)s)D-X+)NJnTZI z`^7S`Eor??#WU2}XKTVUy_Hh2uM(c??a}g$hVSus4Qe@BxeOn`J)ICHRdg!9KsC#E zb2RkV>Rbu^5+_fePp+{?U)F3$VmXGMex`RNVB+BL+yH?ug#F!Gnrkg5%2@CbtZj9+ zk+4w1>MS>2yt_3NkF&mmV>OxFVX*CI^Nzwcu4w|6PwNTPG8p|{=oH|2rDHpthn_~D zpR$y~oT*=9)bL~#tNO!@!cXR_Q9rsl(YS(l1YKf)!51g$mrQI7vR+`zw z!P_-vcJ97pV<^TY#!5Iu<%? z$3TYNX|tU)Y%}&a-Tf_B#iTnY+EOh*zvN=Kv)HHQ^whDHv3ft(Su%s7=|-BVDL?&! z$A=ILmmGPLbVOC6_Eh8f)EWrEv|O3OM>H$9d2&WpUt$NS55|$KL{qEye!Zbm5AI`x zAcwbZ$(IUbY5C%_!O@FrEryf*t>TWRElUka2rA3;_JRnvA-ap!r-Y3k!gcPfUI`$x z>!nEaQWttBtp#OF#o0G_P}bJ4;>xP9d0&GDN(nV(023X52Kn_lV5WL1G5LC!%EKs~Rj#ea&(-1{N^6Cd5CYn#yXnhTZnsofS8)O|N7VAckYxE zRvq7;F3fUnDKVbr7UA~uy3q){#PO~G?uoL12uJ>to@NR){KPu$e80EdqM47jfF$c% za{uf2X_Uk38AK#K`Giozt_05A_Tua_jxm&yO32t+x06ZGdq#D9Ou8OByHx%ijkc7{ z_5rI;v8hnSO!lziq>@#teUf(PQcqoY;0>4e_V9)B~F zY*F>p5MFNDeMQFD{bXxlmhb6E&+%5sHDnU%73FJ!83gFxz)z2lFV;S3{HKLK z-~;8t6MCxM^()4VoE!hR8FvJiEnR=3&H!#@8`laSS>+CGyuC!Ra4(dyA)-D;>wL?g zx2}!}r_Tx!l>HJJ6;@R#d+0e%Zpl8U@$So0hsx@g3mvE?pZO zp68ObS)Pa&W0IE0IYYMIYeP_P17&JaAI;D7ZZb@F(@~a5S!*(h?mWde&@OLuE9Oq{pZ4dFX})Q{Z`@}uR>>SP_PsB29|ZEB^`nC3l8aAR~Fe@D#6eY{AVBH51M*b@ixj|qPG z{^|=S`zm)&Bg|bIUUwDRhUHhQ-<>Q!&wG&pot#i>H%C5^Q<>-``#D{&Ic{d>`LcQN z*`7VV>CQ%ve}3qw6+5_ZRPK!noao(Iqvks%_PX}+-nOV^{pdNZbL)m~kW6h%O<|xk z2md%0(e;G-34MZ1NpBIxBPG{8uc;%K|70djz+3p9tRz3aKexm0;GTa4X2|!@>hZPF z1Bj0Fca`7VQ{@wn7<|`W*T&rA^V?-v`dS2Qmk%yq`C&-Ulu4N7r$;|q%@B9Cc||mk+T>!cV=XNsSY; zk6*TC2($~~OMJx#^WM%gTxwGxP;4N*ON2Ap?HxYU}Ge+e-AOswlz6 zDW|UQ^?cNP@|*u)Fk~!(=Z1r>j@CA698t{mXWo=g*m`O+uV|b8f}Lr~+r4Jv=9TS= z$JmIY?R@cXg`-#rhG9J+ni`!$$?%SKWsgKt-Q*crfxN1Fw;uUq;PRZy#G-Y#L0;81 z8-J}M?_K}mZE8O9>`MJkML|dPiyuDRPf}s(ohqQ?IPihio9lJ zac6sm%-F9t|EO5MZQGtkFTitCqio#_<+s%$GlDA_ifqq!x~Fb=;^#>>2}{DKu!n_t zOV;-x(>))92LYLXv3|gU0Qd&PJ*F zMyoju-DPXD(Hr>HbgR>Ha_b8z!H=1`jrg`4P)dkwET1c&qp-=#u5-nTg*)81vAvxi ze|lAA0CT(F#Qj4rZz#n)Vk-V1&N0f#y4?EiJX5ws`I>Qd%OT;21P`D5E&~TjjfX5* zW;Mf4-TU$_>LqwzZ#f_uHU)k==u`z_4#GJd0P~JM+ zbXqvasB~e6oO+^P_3MV_$C?x=TxD?_mYZ%?;Heq~D)vZeN518Zv*zbDLso%S8O_h| zCC9*5hm92Qxd+Wc`v=nHFLd7Sv4VAH7k-PG@osx>jY|^FY59?+*CLduJ3L$TBw;Sj zFRe$tg{1UswiIoelzJgjCFPsi!PtoZ;6WjhH+)y+JWDahR- zr^GeM2L!ry@^&dunE0l{+eYGFa{D=tK5E`1EPabcITLoboqfe{iEbKj!R0}pL$YgT z!K{sOA<@F?p$mDqIVij6bf zQl8Vw{W7j&-luYQ^Pg4NK0V{wZcx-EqY@I4!|Qsnaz}d?aVMcgt>&BYKX{7V-ouX6 z3%DfQ@hHa%#yj|s%1vu^E*pmvH7@Q7PD#pdyOfRN`wDy)E-g%qRcK5GP@wR2-30TJ zzqKUPV3!_?fj5TMUUry-dwky3H+;E;S(!(4Ys`FTD=XaF5ys{4i)kfg=nrGvuFA~z z+L(`xF{6zQVnmiyGIdX4f|yF@TZT5Uc}r=Tg^pXROE*!j*Q)_h!;JEQ*|(aWfe!gf zje7YYqcV2DnKH{a?fD!Zx=~PCExQ(97L+c%V<6_Tl)uW`ujaH?w($|qkauiV_X`vH zY!A0YT7%<{YY(H^5i4nBcw-qvmDHEFvEQZMm_W%hH#?+py(n^pTNm!4gBwUGL~P4qOe?^uid}tQ)XfL*mlO78+^Rvj7x-=@WxIVW4vAJC znrNzzakbYx=h{)_OK}r$1Mf|rlgB;WwrF0_`((8?<4C3}hO@?RAJ05Ewv(jOyfaE) zuK7ME_NI;h*juU-0+nKc=ej2_u++LAeyfp<7VH`|oanU-An zQOCb55vStU<LJ=F(45>R4D7 zQo;ca_B^5Ppu!+a^_G))p%)Y}y!zE92Mp#P#bW(-r!(T|!W6DT85zerGh0(t9$;j7 z;d-p!vcwk~BM+_OhUnpS&FRS6q&S^JUSsgRgC24idf^%QUFId2CL#iqd*qBtsknDH zQe}fXrL1`HwZ4=#KO^^+xy!?&pe*%-co~srcFgA{<-X_^HOCRS+}aP@Q(h)p_oIGj z;V&Xx@ zGgqo%Tr1}JXd$?6>F06rZhKG7VR7%`U0&IKmHx_qc= zacwB~(A!Joc{jq(sC~Q?!Uc}qH^?AVatUD__ZZ~CTw^WvI zZXOYxkn&nlexMv`-Z<0#IH=T<>(eW$g<_}C+nSnyE?EwknczkYG^pGKe1Tq(|1+I+ zvEr>PjOvE-N}4RS(I-yn`l`fWfROULHnU!1_e=8^`clFmf@}%% z5mB?8qWL_VB|u^YB+}hGbzj4Tj7q+^`5bBjH2UsRH;3-+^Nglwp7n)q=cC1yUUi2M zY;q{)%0w-*v72*Ug1BT|4>Ms8Ja!nxW)I&!GTC~`SjDN^XcLiN7K^3OF0%heM+mp& z|D%08LyG%RP*;vg%PjpB18nKdwg+D>4|=Y4*aY%{UZ6wwAZ~eYU3{sW7|Oihn*5crL4H`YR{5qwFW%0 zNcc!8zB#T@m9M5^eI^Ijf9Bdi#c;UBSoG*Qo~0uDNNP+?74_k|V95lT6W~77Iqb&b z&m~k#wnwS!(ldmdXPs!rmqYp`ZWz?vks!W_*bfC$?c;0Qvdb^Zo;t1BZ?qSv!Uu*V zS0>_WrN+fr)V5po@>kT430A}|CzYGh48K_igpnHSgqe~n`hP0r>54&&^QbvKnZLYX zIxbns(?!Kxxu^IZyRPC*^=;N94NJur@cB!c{6(D^9Y8zrsraH*0Es$fZR%Vdr-;o+>c#A{4PL=B8jd~ArLVJy{&|tlhTm3y* zZl6;oCS`%{DcCe(VMRBP!0Q{2HDq$eHHFpsRX0BOuX*-v-K~uWRWIrBxeR?t_UOFz zu!2X1Y952sqqP2G95UL2FK=&4E|*9QW#+VCeavM2vj6gF;#HdfQAlF7s6;-hu1rjJECkbHW)eX}3La8D0b^QNGtd zbU8)iF_nLj{(#G z^_Iq0&8Fps;+}ix|2(?0I-4}zo%(9Q2sx$drCnLTxdg!e6VRLaQzSn-|ss7zxGHX*TeVQ z7ORse{vARi8yd{F3}wrG@!~}}gmnK{VeG1b%^7^b1bR5F!S{kL-Ek>ztqmPJINa29 zzOWIz2z+?` zdIRXqh~Lf3%mn?MiI09<0}XDr10@8|%@D6A<2F~e_?@FMl3xe(vl-Z<%mKZ$B@>La zL&!le&*TFB4h8^aJ9la#;IM!mkW>)wv_9RL>PT+~gqe6f#P(t>r(6H6^xGdq%o_}y zo5cXn&&9&7zc>%vt^pz`BWAigXDA7=JJl2`E(M z&5yznfr>TW`y81x$_3CYC%8m-7b?#f(+h}kni_Ii5wv0%pxwn}ClJd~gYHktBuLLIDSow;_Jrg+<8lA8RwvhqyLBlw(1d zYEK2U5);s@wY{;dv|#A?mGToP+PzbYy@9$8^2L8PMF9C}RE`D{lZg{;tr=-APeAkXJ&MnbZ%cg8`cAQXc>x zRUOLrW#oe4{jf8D5R((z($th`Twa1TYc9X~*qJ&PyTiOyfstE__bj%=aOG+8i)h^l z-rF{-2e(qUnl}M+@A=WQOQ8Rh>a*LkbK05i>kD_qX8fqR9<;voi&ck+-87ZNmRRy0 ze#KMGfYsEI379p(dP6>IMklDPkniRR!f>L2 z_L=Ex#EV7Oeg+GJ;%I4gHHSmlEpU~ax`%=99nDb3D+EsgQZwMe)yEJzbxeX-0h2De zovB+p$5_xe0oZV$Ro*DiN4NRG*(PS0+Fwi|a_YlG5_5uYUN=wF_69DgW<`nBXbcY> z0Jrcg1)Uyj^VQjLSVdJeHKRgX;J#`{!x1|(CT3L!OuIBhP9T?6@#vt+C6qHxS`M@) zv2ImoD={X?AaPAGyj=E!Wj!%oV6A|qr*wmsUF1FJ)K#y%`QbWq-aP*qp~~&0KKs(< zDOP{ovOw&;7QKu1<`x!(pxL+TknkOPqE5pu&QZ3=tr_M$;W^_;n0BdQ){)$yK79NL z|0N%hbFNQ-k6p9u0^Q8jnE*Vc<(#Y+f7X~cL@@N-1^dEUc?U@t_{W+0J@K}^ztVFA1$8i@X3h}fFfwd`-1Q;aPQ{NB7WBwZ-j2Y z4}A0H!ab@Pz7YmASK!Wo%zT-qnx%zBSK6&7;kk*3T@QjI@TcBux@K-F&9U-6C9aHJ zVZ16Gnbh3g=La>a7-ufMcL3D%0iC;)Aqk4hU#Yp%0{FEvuh#o8zH~mg!=F>e^XYv_ zc?Grx@b>ESx@jKG_KGd4r=V7TsCunTe%-v~ySKA;S_l{K+|n#DBydI-UXR>I2A+GM z*_oeA*|sV;L3SMuEOMowe@lZ4K>Z8={D9 z(VlB_J^;VEZO9jxVYJmR0S%W`*w}lrHF-MDU-<=Fp2JWD@u*|hr|rWKPSXMCwiiM7 zeUyPc!=d-7sYC9gFX@h5ym^VAtQdqaiGIg#V4vZD7kf_Ylk7_Cl}!vZ%u2=<$N`>( zW19zZ?>;Hjl%d!G_7Pm&+|Y2FUTG!joR;n`BDR%- zV7k4&2=XvMFlCkl6lKt@T@%7719&_SLH{>^CZmV-r%ez$1z30@2-@(y4Gc(?(0Spx zfa_lZ^aq+f?*K+T8VUQWx2dTq!0!inz;%6neUQw7fOke3{H+5JETe<(y9izB{cNrZ z<_DU01XyHdzqD5ieeOM}O`SnackI#6Ain40N_!v$dbmdc?Vd#X-B0$>r$q~qv0Y`M wIDaz@n9}DHNWaVe|Bqzof0*K*v}u={o4JCq@Z)4X1pZx9Qdi8rZ2I_r0n+oVuK)l5 literal 0 HcmV?d00001 diff --git a/assignment-2/submission/18307130090/img/SGDM.png b/assignment-2/submission/18307130090/img/SGDM.png new file mode 100644 index 0000000000000000000000000000000000000000..ba7ad91c5569f2605e7944afe3803863b8072b46 GIT binary patch literal 18198 zcmeIa`9G9z^gn(NvP6WCY=x3+sqAY}O4+mTyX^bEOqOg#k|bLw`NUS6N~`~4q$f9a8HuKV2Qy3V=IIoCPQb4RKwE1Wof<~RgFC+^&qSBD@1 zI0V5^qy%83uKaE}1l^jrBY*wAx6$$hdBeRe+~H3?`^1mu6K{}GtJNu@gVghG#{^EC z3X_XepAU9FWvI&S?m8bsb0_DGYu>3#-)bD}wUkX7!-N$zN;GYrlvM|!1V1Rh`v057`jcLnO_hl?eazo zik74JI(FFg?sx2#Sp~#oHG|qbQwDPlk>V17ue`XF0utN7zK1& z@{TH!%mj+)Ei>CXn0~Za+$ZP>fJ&7>oi&f zbh2`3X{pj~$Z@#TbgC(Ugo^ck?C9&r;VS&oV4iFDlizQYSF#8Zy&~a^g4YqSo*dJ) zcPQ(gNd4!fuUHU@oCprnYEPPeH=qfb^ujc#xKe$I-^{6zGm&Lqvl}e5MyA=8sFhnI z>DxV5CmR={UWQJL{zF4=`1a*6NLdxEPM*)6^W_bQ>mq@uI9q^4rIXVW#w-lsZEjPJ zmQ!5t?WK{v47u=kmR&C{s%3(I7l%r!N$e-;Dm@klGn*|wJv(vc+Dl=FS5h?rSPb2z z`xlKGyi>(JK72PEdL`^Inxn#oo?Dryk3PkH#cRc&Y=5PG)yVr>*jvSfe1lrode3Fg zz3GoBUpCJVyz32 zvwK?LsN&u7IxlT3?9Hk3G%+mYxc$BY`jsoHwW6@`Y^CJAt;Hbhay26(BcEQy$+A}o z3Fr4pnsIw}#kE%LQFO_|4tq=GNFb8lEQOg5PZ+=~u)dO}y@fs%8BNwfLD%me7X}I> z60^<-nuWS;ELTsZNe9$<5ASZEqf?qz>K2y-cPUWYlYXn4eJY1PvSNjuCV6pu#!dS* zE;A`{lmw!Tr~F%u{MCq^J7R2Q;LkJVgCp*!F-qD_w{Vmb>M*ZH?SV%m>oRRhx-{DjWp`BCVL8L@5;kq}`aa=vnLC-ljg^XXI~Q-yd@x|~L6GmT1YU!EBR}~X3X}+_#Q)WAmuc?J2*g?u1o-%J+bzXM?`&w{6@7dL_*CDQu zV3px9r!KVI;vjzUG~oi*6-Z4?v6yB29zPM8)TyxP)i&~JUpW9(T{%T|Nqnr@Nq{nDs8DyKoxwPQpx%Du3Xc~QJ@swe z4e7yZ`|;^HPv~%e18MfzZ?&HA!kxQWDthrti=0J`dN7Ae`Ok zktk8kxG`4cQ0~_TbNBW21&3P1VU#nbd2z6aNf_S#h-RCntkGrWLmY>~3m)wrGL*=a zzsAz}eeBesQMKbl6dlip$Bb9qe|$k8-N@=I$&HXtIN(`VEu$z<&emkm?Q{4eKWl#5 zvhz80-eBfUabZNtW zWS2nm_=K3tblWQ-yC?<6(TY4#X-XzUYe7kq?*~Tf2=gB?EE1Gwu6;ZZd_zWi;hN*P z4pA+s)lU>#(0*BYC$*;?Lqi&q0;FI)dW{jmV3hmY3c*;9@M`yh0`*ix6?*fcG!BXd z2JKi0`U5qhcNe42i11j}X5`&l@gA>#1w+sK6lPGKJxX=< zZRnFw`Ry^M=2{tQ3$R1DoR|&6;UdE|Uq@ybbL!i>mXLN{=Ko}&+%|y%b)2q05aK>> z|Jx4#V~~XHpTd}b5-vi|(pkvx#$6jhXeL36AS*%9Ryib7pf=qr*<=;Pd^d9=h_Hl} zYp;fT%VGYr#i{_zf)}Dpp_(|K-$9n-PJOE;u@* zFT#jWktsSdwA_TXfVNkUE^+xG2^d<;IXWvN91JZVrM29vU|!l zmx<+{9bnc{-k%}5SeT^u8Y-HX#=Q_3fu?-O%|wyaf(p zRlrj*)rrD&23Tws;E`PQ`{hs9}-t1fG$Y@;S_Zd*T=CM5Wd5%FEjYSEg_n5s#ZOfG_0~ z(cW4y+x}eOZfXZfC{ABK%U4aHJnV-21Ocyz*UU!{Mr(Uz!mdB}9HssMo!B}fVl);W zP7wziFU!J<{{(C)gH_7+eu0+5#$5M8)Xe%WO$AsgllXn&H5RYG3J8|UCgFMSjT7)} zgC)i##cmIPr3HpHHCZWHcni&0NX=mZO}R$@(J3uS;1!K?C>mMk9-Q$U4FGc)bT?Bbrh%#B#4iGzYq*z2DGohnS|7j3<>*HQK0s=1_B|t z6JCD68g!@f5bi9M9~}z77Ez$;JpMDZol1c!?>R~fbFh5{zSw`13Cw${D{#9Zt)p)Y zYN9}0MgH9gz*m*YLkiR@p`+ya3@#GZ+&Xd8Bt_GIf{Kj{kC>#<>Gmt|0!@u0)T=Ba zDNy;E{~={~iLgeI?kFin#w&0Yng5VlpS=R-(LTy(m01`CN-+VL`oCLKWWY^WV|Mc> zsRAv)mFo4QTrCV-f%mD~b-`r)@sd_7It^rYc;Ivv9(15-cnn$#|Hn{C`4PYxe9G~r znCiFPrT2uK#)0R9_A?67nB<|3%zKQCh{YkiK@Iwmk0z!@lwH(TCSDt@#Gf0sx`o>7 zM9#d*QkukT3j;@oE@?f7r6l1k%U@EimW&7gYy~ zR>s35{cDcC5wmOwQ8E2zK`|1*2eRZJd1Iy=2_r(F=|B2ruE2e!v?E<6V5kn&%TOUWspMhMyVi!N966@%-=YV)BHOWe%bxA)C# z)#e?Pe6+V9LbxtbDVm)KoEf zRq1p)$d+8bgI9~nEAm63F5HUH<-33Xl-fKAvMNXBmo&ea_rr|*-A`cP=02Z;!w{T? zTjOU5MCqpjc?fG94e@4}>d#P+0AY%W= z8T$Gbc$NEjH-`J7i9Ep$H;;=uN4Qhbhj#_PcAXTZQ>b%}utxp-QR+!az=hmbJ@Q+FA?)8`_rxyN|)IVRzc5Wk_`QLg5?ufXrV!28HQER=Z^P@pRMN4=qw3V6Y* z^T+|YVU{04lT}K1r}FS0Yt0r!LA18>bKw#Mr@~JJye^G1l7d8qhBgfHfBoC9m<<)n zLMks`7ZX4`vOp9cW1&f)a-Wl~Qn5;2d=bkckzI_5D<;EkX~+E8c_+dh5dS%VATr}d zn6mi%8xR{sruC#s$gF?lSP*RnqL%XWraBGX`wF&gT^KF_E}I~d1L5BDOPZaOCN3Ap zUtcHmNHGSSHw8R|0?{lUbP$3B@5LsGoo71}u3ov)7EFtno&rZemj%sWNLv*o^G7>; zm=NqiKdo8HXz*{*__{Y<-7N@h}`wHlYdOKv!US}d+#iR4NvbtUiyk>AM3 zNcE)eBFHEe#%x3vlS#N=Jc%&x$S;5(X}pL5uiC{1x_RnvU-0Q?=j2SecJXb2RF^^R z`iCb>11^ERLZ-U9fMKk|5r#|8Beh3GoKdnaD4v){*saBFaBIus@PA|18pC>mXieWK z@9zsF;WJU-y`H=$5eL|Uzr(_C^xSo@&L^~1B+g;S@hVSpX0etV>_*18vbge}pM2}^ zC-&h7U&p9wSOXGol6Kd-1d^{kxMDZR3wHFH@=F%P%_zLxX0s_oXL~atGUh%fxwRZ) z$ZZVl6*cvQ82EqPe_ofRB~N=nCA#Go$cjTPS*T3hO0o%M75j9MNA^BxT0!#q@@;b|k@k3KcRMoIYc2W%8Axy=&OX^1 zKczzO#!B8g%_rNJq}e9rGRSdKd7^_1`=H5#*M}WzalscM~pEKIqFn zz;0JR=(F1V(F=6^2tz-GW#v~K$7|GhBh05hf4*1tG%gNgDNk6wbh@T|8YnICgI|I; zo4})O)aYBOlWW|J>(A3DN`aKK1UO`9}<4JC4?Q6o~f-oZ(uU^s|Iy zww@nR8vh|fukJJ{{beDAnb55t=Lkj76jvem)U`o34RBsSuJ#nTTk2KXHu`RLe*CDa zja8lrod{~CKyjtvO`7zV-}(_I<%em&?JWYABr*Rc{>(L-%QjzLgOpZo$UcxPtG_C- zb93NdUeicQ%{yXPbi3Zp{uK_k$tarcP<(4 zxZWz#c~kO++4ELN)vX)71#@Ijcnk*f!7oFTzV<~5x?|b z7w>tH!~UzR(IJK|AF)A)+d*uBJKw&&4<8utNtW<(1ot}?nFIH-fcA{?y1kl_a_jUU z+%C9%fh0YvglA!$cqrg>0Ayu$7$B6)Z&~dCw>}I4kkMqSz8sa+p^~W%wjkzna1p0T zgLf966j*&Mz%`xY*5iu?HEY0}4qUH&kaF2&iHAUknFIuTzmEsRU=Rwe^yMJM&?z(H z4a-$ydKEU`^w=JO7mjHYm)1II5y78l;|~l1w!htc*a-6ZJ>7!lA5vU9xsP6?fzj;EuE19t|L*?zhlDjM>C)gBxjRgZCv6;IzGlLR zFs%D)>DnGZe)2P+jcdUas34riA4Lg+VtS(mIw8j)}bP174wj8x_wz z8U$n#3aqQ5?Xie|P0{oI<0qfcdbZQyElJj2>y>>kZhq|b!#_+PhcRa%rDsS_Lyvp^ zl4J`!O=`kIMGQs60J8WgWV8gZ{pLfwOEALQ+H-L8%@z{izMBm3?)%k0(+>B!P9nf* zIy$Xa0Zgm7b97p)3?b|% z?=U)@gh2GF#*tTWZ~<3(A4D1rc0N? z3GKhBPGh15a!>e`DKmK1@*8nYjC+fd13&V4Rb@-Q? zFJL|9AO7VAKY>Z&#eXSCtp0a`-?pI;+-q6A1A-b}|K3%{eQzOnyPzNlo&MqX!aY?& z?{QHN3J5{|XGob74nt7CfoG+-fyz9<6;u?G@* zfBzTq6;Z{JZ3y3IPeSbI2qjsJqu ztLJHH(pH~eY8GgDfLoaT$h-O*($gFSJI^(gM|{RXv8<4S^ld=vjlM*%R5qIuG%mWY z=A=TSl<#I~A}C>l3felTXMs8&xF{rDQM9%g<>I{-z(E4C9s1D>1*s6+<^lex(+rQ8 z#a!Pwe`^Jf)5RCKz~O8qrlFH}h)Pdu0XGinbCH#xkkkoR9px%?e*l)Z__h6v_EnU~9KJ_!iEVk~Z1s7j13^$WVBLYcGhCgh8Qdut+~0cpeGQ zCDjk4zp}WZg0icjVJA=x>RS8PeToPm`7j70FHJbi$HP+qs=6wYKSS$U@i%Id^tZF% zT)tJzt7SR|H})4qYC`Z_;Hy5O?|zBWcdVVoj5%rhNl6n(?^+0XaS%C&{+?h&-BAxJ zP|b}gsi}sQwgX?{OUGkarSw2KIGim|$1pA#L&l@4-aSFe&16akoxSv12q%CL?icEm zfm&FRQR7~k-wL;WRp9}57Ct|_JT}C-0SdN3Kie5V4OkD{`$aK)Mh!dpngy;>iloc5 zS`9U?>Z#R*TQ5Pf_bZ0n4%?m1JU%U$>AST6VuNWL@0Q?Wp4Rx7!(+ihKxXrc90m@q z>eaRbw)0=#_5^HX2DC=eX}D?M8Cf1HQc5Q10`B8SW@8#wpz^CMHk6Jpf3CQ%%CQn$ z5_k_c$o zWdyVyXV7vJktsr1bw7*MD0l(95%�sqOTbYqyz4xt89<_d9p1@YK|=4q{ZHrxExk z-)fN!Nt=cp&-?Z&9mGp}qP+{FLTljHSO5MBN+OT!0!OoA^J8iG2u{mJ^%)@#67|j- zU_&F#NAw-lHiTrBV`cioU75u#482l?K;CH;M9;TfeyyM&)K3stsS-d#ucqV)srSJa z;w9-yExOGdIveSuh75S2cYZvY%x^_Z$Y7&yv`Uk$L(tvEYn$LKaKV1f z1W!^PlIV*1_MY%S$nFu_S@*>k=u3A4g~CP&p*tU}jfuuN9>w6dL(;?!g;|;lJX>#q>_(TR;%;&4N&L6&Ht7!X; zFQ+3z!6x?o3P7rGir{yzHDT@hYHTfNo#0$Sqj|$%gti38-C25;I@6X7B8n?%Zk0gdyQMl zHbX}6`9%65lqNZ%S1P+q1Q$iCNo{N3pO$=8`sK}LZWyjj)iWsRifxci*NfR*QRpO6 zG9MkNSoC$VH&o0;r|8w0Bwzg>DipJYdUf{=XLafa>kd45uJ*mU#*OTN!M*431Z@+H zVlRB|DRt{1V^#I-zo7h_A;W)n)bU2_;d7sI@Vntl7`pG%R?m_8d;Twotkwua`bd{UB#|(NIAR@FKB*To^Rf< zR>10iS=4HO2SEUF(#I~;rmGih?p-#`V4U|DtLfd_MPF`4q|gl62|7hR;2Q^}%** z-e+qt%hiPB2|V?oh__<-*E;vOL*>*nHS?4JGC;wv zAnb0_^auu*@y|sY-7N}Ucpu9B4Xy9@ow`!tfsh?BJ%8QB;oZH(mf0oyEhiVh`L~Et z0vd(y;E<$3C;9NNIBuXy8%0G+|H}X8xRD=U4fCGx+WzjkxpGI$r&$JL3X1XoT&?6c z9%4)guMu;N&)Vy){f)tV&DswaDcLCZDIwHaMw;IfXpr~X_vu|e9`MQ(4 zX3xnHh=ipFQSvKvHO+iZdYBx5O+#gPF27S2|71I$2ml__c*eWx*UxjXUHJT|T;4vM zGVD;lP4EY~0MU!E1RlaJLFSjEUMbR2&V3_V0v9g?rpPe|*ONf-?r`6g%Kl6C z()V;y#aqT*^}e+Z${gl-elP`SB7>mlzUH82|BPWHfFxS%*C6bQn-Zo3oPbE!@e<5l zfQr@M{~9<$RKLY-x?$S-?p>P-Szy=G3HcoC@y{+}OOR)bT`DdBTX;RBza{fvD{)Z( z$%c(d51_AM6oxz1FYnk)z<6;dp-(M<86l-GzUw<;rpk(jL5&T&z%KZVWb zU0=WKwY_3;n5(vMyI#oVtnsVKGP#2>j?z_8M+4Mcf;O@5xZMOy(q5++cFc5@uyH#L zfFG!Lb&*n;S*7PGWw<`y;l{$(0pjSUc1rb)u>LgCBF|-%v6OYp)}Ve%vari*$)G?K zuN~S*mMF#7V3UxfwfTpXWz;@3m%rR- zU`o6SB?i<#m#8~_LNqbyTjLx=rP?W?Wm}63HA&B5OU&}4A~HLu9W7^%>{bEviKx4*c^{HgX*UQuVINxA6g=Dzyaj*1zlE1@<$ei#k7^rnK@mc>aDH2w0 zAM~Aj(?!01As_Y5jqIK^)d$RamsVf)!}$Cq95n|)L91SqCSiAP}fQ>(mPoXv5I#w{I>nGuzCB#%;kZA<%n?uAY0=ERJa zc+b);4rmYbsD@K@2{cA5+!xZ1rvDZs)sgI^M|&YRRZlc$;<_^~i@kUU$p4w%Va|R^ z>6dB8Dfbw7YL_mjOIJ@-;J%=DpBP1@zHgnlbcNW^Yfq7F@}ZtbEMiw`^5hZ(*iHi= zP-)-6S>c~!;zPSp%!p>(nbULr=q%A2G_`bl{97}zU8d6v5nA6N&p_mtbRB0i^IJQ; zc^`IrXxivx7HbvM7Z0DO(^@rK>}~e7uwuphR~y%*dJ2l4eY4p9K9GN3%}>2_3m_*h zc8`=IrJ2V2Gg2VW=FG*wlRW4m<86*~53SdSQ-@W>jatPXvjmK$4eCwhOdNjPY#>)=9`C?NaJ{XbG_z~t_?H_H4ORNM z6-Nt=BEPLn45T%@+Nj96X|vM3(<9O1P~c3kv)M-|#g0soz->1VicEc^Eg6qpA}S5g zuz2=TsaJly=cVbvYS2R*xxn*7+zi{pWV7ooE42?bxt=FjN^PpHiKiCn9b5GtGIafx zog5q@;z&`b$$$3D)iQ^f2P zJ_mdz9)A+@wt_mBLPwCMUXZl~k$6c>Yu!c<;$7c3+4Zi1fL+VtWFNz{Q8H9Tin%4a z^A+_gQWT$l@WynCnU%=)-9<&a{*U;W&#f6JJ{)wzVU*}rR639A^xk5K7e&BR5UfhA zyq^ws4%Fm9GgtfLda6K;I;XP!U`rcy{diOK*{Tl@2_Gud&zAQ#AfWmQmvCP3a_g>*$KwYTAB)Ux z?#85I7nwa*o~M6zDK<_$ybzOcH$hz@)hbB!(v5Pi(FqR9H+mIM-BKC0Q86q>PL!MS-iTa(oX?tqyvH$BY&-l*tcHX@SBEpftIP*|D0jyai{al4mJNbg;&S=vE`$RZ~jTaPgLFJKEB^vZBA4` zt~Su#E^l$YGX?pk!4kyUEIsV95YMLR{657&-W!*^sDFcuf%|i zV&Jxt-gkiIu#HXUTe6Vae7et>(6~SDww|2LoDm|Y|HI_G*mHnrI`3tsX>n|A$ z&SFy`$^#cBkxp|nmqPAQV4A(y6&{y^HH%#*J^A#m?UY7= z*tFwb*RCco^9K*h6j;d`9@=h8pdU_o>KC^N|6~$Z_@H*IbfeK!csnt8xSItLfF;Im ze5&p7xmae|8d$tH;kD7w)sy;AvnQu_`pLnf7*l8Gfsve!T55)sYRb^Y{zpHWXKMQ@ zIyOQ{SGk5CY)f=;?MF}+`FT}#An7py(nafaZE1gU2FqQ#+DXwKBib z?54B7jhk|r^6cQ-=uz{AO1m6PgcX}xC`MsyTw%Ybx73DRl!Zkif19RAy9AZoxnfnG z*q&iMj1^Z3eY#;=R9{QDQ@?5vOPkpf63d#pXf^ti?O@DlAxp^qw9YIDhdR<#zqNkr zkYEs|?ifFg7Q&=S%{EV?Cco`da_#3oz`k;fmCncbb^6q|H0sZ!XweM9VR3^W5`J+{ znyj0S=e{}c`{4lwezk{`79=)_5)-t=&&7C^?*pwj6(i+%>NR!`mpJ3y z1oUm7XpkOW^SE!>Ft)cf_%^XKcB3}^=gsIxY;5P?cN2TILfXv5Q&fPG(=Ji)w5~pw zO%bm)4H?yS?osF(R{$6h>Uj~D=B|_I>E^U_Rd#!;fa#!C`NHg3gRyf2*m<=^-?jKS zPo`@ZYqTmrs-Wm?yPMzI^`y9rgF3w@XIc)Ktkd#}E0g2zt$22Fx5RQJ@)ZO4eOq^yQ|Dx@wa8uu%76>zr&%1zX@RI*3e_Z(B5m_t&fDDjQCzKk;pewjCS15c0_@#NE!S74X@3{n{pdsAz#EaJ zNn4SJWMegIY3G@C-_T*_O^r`ISe-=Gt9FV^hNmql-kqIG;eW18&V=yQulI~2(0Nb= z@+s@K(~;)Q@5N2vlGbCpgS7QR4rja()9r*(T0MQ;(vu#sg$BB~MO@A6^$`9h(V9*& z6D$lMxT~5^+^fA+eYMa*-{(w2g4^bj|Lohw&H#Hu=amhS)S`2qac(>k4j$(;uemFoxc&nsL? zRTHXj`(h(lLC;4p)PJg5EScN=NB+{3E9Cs23WlU1B^$Z?8wi7G&U^g&OOlCoP<=KChw7NZ*+!2+H+xq+azDt3T_? z2#1UajULC+g_x@4u*{pYtc@E>u9KRXy=HdoGL8TgvRP3(^}&;%##GMZWj+4d36hz~ zP6SL31rym401_f14 z;ogNW41`E^ZuB{Jy*Nf6c%T{MqA08NLR|f>oAPc(ucSjpc%-R5Xfud)TP~`2TJqz| zq&?wdw)Ev&~}0XtY1M)RVafZav*GXaIs-j}*4oL1l}MQ?O_t zE_^-MK+cvMF~`=Glv;_PIcs=|Ndu30==F$ri?it{cKOr~NqgL{t+(kJ(hIL5NZU#! z+Dd^QU2-qc`d;DP6!tEB!b7{1Ye_B4+g11hJA;v*lF)Lt#8c8dv6{Qyd$)< zq4uESzN5BL9_EKb&$iwphZT`pmEp{TmH0f3khdJfK(EP6a&Hid3SdejY27^Prp7xL zU8)?&rOTpE#W8T{ReaQ|Rt`K-9sMc$n6<< zymo%XbnJ7It!0EFP5*}TfS~kaX0dmm!Ux(x>OltseUx0O^97YOgAyYt!E8_!1a(Ob z?Qo!w8xId6!}^ppuyV`8=Zq^U4=Rj3tNhV5MpCpYkBs_aJ$>pi*6=em=%Yn;EGlY3MMQpErohembRhPo#kXCvP^j1f;!QjIlX`S)j^3kjp6iEJu3ZB|t7cbo-B zGljTRbYb}%Wt*#owY7Gh`fM#atxHP&RNMcHwdNqZKvC6usntC@m><5(yV&uWlC6 z<`zjxdRVZlU;}zyyaJkV;@;e=@54jvf`yl6vmly_2v zL3C`lRV#3g6GxRj>%SOs<4u#OJF6hO_XE}p(9C=0yB#d@Q-6fqROXxF-6kzryiIR7 zAqgM|GEv=oAprz|F@MS)c>q~NYluF4d`2k$S?=+3lmL*2bpeHs+nZ!^4b*W}wwiBN zFEbEY^*m=dL1}le=O2(*zeC+=+GxJ*$apyA&mrTy+d{IQAF94(OXIuhl1XhGOQYwf@)>0AGam$PCG0DQ zZBLT>YbjAMUA?d)+ULy$&cj!SubuqCKW-v81cZ z{t27F;o3t&Lc8{u81H+2yI=1Pq}z!6gg+3Q(xIzc2vxFAiQ^#bn%1slcy%q#SbO0y z^&yYq3yR4?L;Zl0Yyjr6p7m%&Mb>`&$E&56pc?dr2|-9qal4BzgNq^}=vYJBYyBVf z6qML81xXsy*v_~l#w}e+%qp*^LK}ZOnOI_1)rf?x8S)Nqgu+RxS$k%*25x%W=P+O^ zrs|6ZOZ()r;ek?D&zz-j8d~dBvJonNaP^AyzBZe1uB>@>vu68iTVq>VrZ-Vp4m%(j zliEl-cY)R61BH~E0_1mN_3jru-gEsNypcV|YgGn!EE%B4KvD06AC1! z5TJ=v?B9a|YvWogxpGkO8PK+Klfm$Q{{1}AXE*8m{)F97v4<8w^!@-yB_7Zd`U&** z^(l!Q0KisHJ^;mla%R4KB+WEHhHdRFmNchmf?i#a9Ng&Z>jTe9c`oVX$hbUVm8t`n zvbSyqzVmMZ-l!ToA{~WJ^HY{=_r)wdn6kifN)d=22kwpLu&CoPV>zH&Y*-J9ww^=9 zh6}GPI4@Z>2Odb{cFfPi0lLX2t^~9gMt@&Vr?|+5 zZcuW2+$|Nd`n~+=+-ikbR*b8srzdFOX=AlpId=zOEvDDBnXyef)6tl2AjZfLy|_Fx zUCG_{k2j;~M+!iRUM*KO19Ttuc8Z<8q+L7?8Yh=DW2G=UKqO1EoTYx-jz7bZsS-n- zBWKwkNbj%7`3(T*Qd>Ug0M-|ixW<+0{>)8wW1*lV4V1<)!t=lJT(sVzU3CD_C6AC0 zi4WPKS{u}NDhn6~$`drGYYp~68(`52a5EE<{2(&6<&-9Xm1r7 zGH^pmZHpdco{T1;R`}eKLB=B?fd$HcI5vf|no7BB8hrcAHauFONr(A$L>SOxBLLG^ zJ_KmUwp*oBjCA4GLrADRwE*l`sZiW|?R8)LYtRno0*-FNB8l zqfS$zu+Nw~$B3K*sS%)CQSEo9*f)O(?qmJSphcevgyaq0E}#{k=YHo!XL-L?jCDTPYBj{jj=w?EN zK#2gQ%Nb#N?|s2SfU`J1b>|vrqqYRF(P8N-&^=-PwhNddcYxi9I##7}=`1|!@`JCO zae@H#N(2hg2=K!6F%;IFaj)$v-lYWop( zTzZ6MnFt2poAvC|ol_ED^Hr%F2P?R0)<*jC`E3;y28H+J^t`ItTzNp>b7n8Zm=vaj zGy~hJPLw|^@L;lGx6WfRrG6hEO4!{2fcHG6Tf%FF8FPafxNbV?l})aTmR+Ph15``o z+z}jsYP3!bKX1i#_cT^HPWVoE3?+ci9N;aK^j@y(xiA3!jy^O8rdlzEoZB577;veX z7NAi(hf5!xw<`;1bs+%P0Ft-VS}z5fY|MlfTAbx3;_f%0NoC6S7z1)1RvbpqWmf-2_;YWdSpqhv1_K zn!r8q8`ed@3bacc!CAoDzSndx1K`QV9P9b6UftLLa1Nca>1N!Pz?L<%Mvf;840pu) zanRmVCy|i&x+9iNfMy$XBV&}P8_z2~2PRiwDQ>!E7x9n~lMi}fG{3uedRDm^LxEtG zp)SBBfCB^^>fQ@|M;`!u#w?APhY>l0#U5wi_kVX6^cUQUWp#*cSnuRXjRP(787`Cv z5u(daZ?|PE14Pgi_-FwTg(f)sz>6qCUaOM_&+hV%JOw8h%S&)N;wqT?w5=B)M>NAJ zSwO?j5CAYq987<#0biW}`dn{k4?m#UYzZM*PA+N0oDsNA2wLl8TXjlJT3%ldp$9#* zz(<3?D<>xh0A39@@0S<_fRos~zkXLsYY-e60?&b^>r|7e4c5zlfNIV#iZixap0p-|G3H*C!) zf%)dOIa0DUA~?Hv{h+>NwhQ@s7J}f2nfVJ4#JqXZ7=pN&;V`HO4iP~JJUluGBK`j# z{=JYVVPji2L%j!2-khpDeZy?Aq~4d(=9?Ffrv^TnWB<)<|a_%`0MR^VyZ zn!cjSXn2DMJv{4q>(al=1O-e`M!tj)bK1k(;v8W6w;ZJCn=mq{O-$!O53wqq+& zFC_^77V*+>iN&`9g9g^9W=5Jlorz5y72JZ$!HJ^o1v(|>0;>6F#Bmh|90pBlidO z><7H_<8Ii-T`q6fUkj~ImHy8=HW%T7X8~FDEnx(DCnw47LVN4zT&33V?kp8ANqE8; z!80~0uv*K$45dU7jRO501U`k$K+eF2N1fB~>(+y%mVGiCF?>e#eh0g+*%e;}h`KFm zmst%s&2(JwUMx-vDYX6^cYR^JrlPvaaq836M;!{u5(eB2D~+U?iEg^jNT_qP3jH<* zw2oGqD4fEC;Cq`3?rZHFxt~u^FzVO1*}AU3wMe@8{V5d*2?=^IU({xZe*i=6Y|mlT zlg|o2sKQ8a@b%0i4KOEOs+GzOm9!35U)SIneDsbb3$hz6x2^NqlsMd;@?8H>W;3kX zj4m*!kv?jnHS+mLNb7U3`Hf4fz-J-9;-TBtTyL66noNmBk0E$x57Th;Bi`Q1J(ui) zNU3JNF4{?5>N%N;P_p-ORr_m>(3*q&^`5&*MlQW~ofMwH#M~B#BoF453+s2$IU2dm z4H`M>=LF5e&S#%S3^VT#FHB63Px2aU_(3=Ch7wS|PQ99*6)YN|OjIO{q~5gs6n5^z zdSCZVl*b5ca5~9Wvre~8hp%9-w^soF*RL68n>0T4$H}yhDT45Pk zr?(l3KL!Y#7~Z>4sO|dWt;N>TsQcmmj#Iv{6E!hUYOz+KA!m7LU$yIk-FS5{p<@r{w8Wz9piU8}! zXkq)?KqERM%@QSice4mIlKgh(11Kc47dFo5aD_jZ@LB)7ySX5_ztZ$Ujo~mv_G~Y& zef>5nQ~-ZFJ2UA_YZ+R(jiqQn@5^YI-ctM6EPaDFbqZjQkHavsBU;X=QJ!05dSXP~}Qc zBqI-}9o(&O#xtAOqk5)uyNXi(ude=IJ+QYmku z2dG$c)w5Y%iT?0zFnrCSOtZP3;M~3HR=@G}zU&3pH|g)C z9AV!>2;L*Aj59c}pGOTY!2FNFC$3|?Smu|1X70+nC z0w%?M>^6iC_ig{?yx_!j52xOY7vV5OJ?^_zQEctXLtozmZ!{DG*u`7%ifxBjuYOE% zB7}O=54!adw+~=dp7wVlhi4>Sk`S{TZYMyuX1){t_PYQ;_rI$QmxxyWIe|WQCY_o= z@=2$e@pm?*sBGD@LvDkJu~?ZOkX{{5Dy|?|Gj&LVhVIP zMd3>gG4tc-`6>WEaf-oXcRo`q>p_Zcnbp!rnO?O^wuEUiF%^4%zFx(~QG2gdI9p0BB z6&T7_uRU(_HT_{BS0+EA%_r)8 z>2KJ~=`cyHQL&$Dc?8*yX8gGa1Kkr2Dk_n}pTrmPN;asyqkv(5A7iL06i80*2K<5N zVBq3V@zgh85>nFF3H*+eT=*WHG(S&C;9%v7LDsn~jj&wbr1riA|7* zW%%n#3h35{lapuBy=kqHvW!($1Dw7s62$d{wp%AXX1gCi_Q*WkF`h$8!Q$RqK^yrh zX`jILZMZUC5OI0jm+_){I8Ubpl6YEXt#uhLWc&3GKY1_UZ)e2Y<)~+Wm1XkXIFJ~{ z`u?a?%T)5+-H7ZIH1E2smeFMvAzuEl{S}CB2%XP=T^kYq`dN}mCjb6p9~O26+9U3L2UhEtR!;(*j{T*& z_V4AZV6`RzNDAm=SfrO7Eo^ZVcA^qi>j&)f-$#0Hui8O{dYM3=@yybbMnTl3S)NCa zXwknw0G%da-5le^8>)DMCj@Ue(8P)OCpqzoP-rV(|E3W9TVC%T4EjevwP%S1?#pF_ zB%d6}#p$PUSif`4N#hAlbq9Qp8TmRU=rrB$tpX_A@I7{%jz5x*8iT_3#3> zwkltS9FVj7Q)B;-W3DI!pY1wMPOiZy`J_$>&P4nnCtRe1eDbZ}@neNc#DPG(^EbT@ zA^*dwW4Ajh&{_%GAGW|5D1I4ql>tGKy(_vBCT8~h_^RU8YS{jK=J8dn7iNi8IFjdp z%|g&+&$>|}>M0SBapG@AuC~^|_E%evzu~%24HNUNKE5w82l#rg?D18b=qi|)ZQQX{ zdI!6}&T5?mb`!;M0+qyi$tNi!z8qt;^%n&BWVYIWR*AHePhuy&9bYx{1w2Cb z9eX6Y^AKbd+=9QD{2I2uTyy+vFV2IrJqkH?w$Uq@fP!w(I<(<2J&bB^iU)~qC8Xb0DE z5-gH|+v?;4amc@KI-dd=R`j`F0?`g7@!vj1RlcXQ5c~)$dHitBJ+Am3`yR(1t#>=) zZ#Nu)$mg%>?ll~NXYXT<3%hP9C=MX5{!&|zBu;($4_W^8^a!)ozhu3)gp`vd>J;N| z`0w^5Hy`@u$MP|PWKq{J9ppcTOlRBMo*Fp(LyzFA&-z1mR0d4Ub8Wf?Q}2yx4&Ec5 zw6EX!vF8ht+Ktf)?Fj=L8EH4tGH&=>y{C zUFO!|Vzbu5y|Xkl?2;x|;jLjm&A~;u8m|&$-FNfJp z-n5^3aB}j^eKENA*5YufrCOa3p)4X_uLdS&^#&)+WUvIM;1er5v&sG-Q$g3^h-;w* z3?FkoPMlu3&D)xop@sk=@-w8?-wLP?fcKd1&wkAy=&>^S&B)((XKjX*hP!feP>(`* z5}vJ=DKmB-M`6YbdPI*z?y-wy@Qr){JV>bDsy#>&W@2R-J<~z#J<}1DtNOm9YBp)_ zdmxk7GV={qD!2Aae+&TNNCJxty8_XQV9#;WQ)atBw9*#&_prc>EsX$WHGO(qS&a@+ zL@RyDfB6AymVs9`J)$Y_Kabqb{b2`K&<^>buN;@oPCj>hk0Ji!kG7V^Ax07PW0JlU z!ZijlUX?!fC|~M?5IosO_1L4O4lYnVIi{IZ%EyT^aE+oz9PLCLCt*MfFwLXr*dx8x za65?NeEOfe1rE~f^L0B&^04yQs?oAA00PA`5Ih8 za7?AtvGXdX7urCO9?q9Iamj=!(1Cjz#OeBsW*FUDeqcKDaHg{(pHASa365nJz-CA3 z379@?#95$jV1c+QOq<)D{_zxAb}d1X#H>J#MSvO?jD=9LQpo_b=!!FoRn)99>|wxp zE#N-*Bq(yGGe-IC)kk|@)&i9gPSs|;2WWtXc{P8<0uYeqyU{C~ndFs&q{k80 z;C~seETLmW2%UTmL{aMttQG{E3Jqrl11SE;r@Fp-|Ki9bHaC%X>n{Nf)q z;>G!S5VwEi2T~?)3w2T$dP)WkUxt@vlO1CDJiR)4THI^ln0z(mQ- zN_E45`7il5;C(NSN!}ZR6{@{7`_tfzTVJG7Ac>i?e_3{gcrpzo0#cAoa@@@@`o;eJ z#2JKTC%9Hmjz9b248{iaXC&cWi1`~%Gv15ANl8g@nZazL`(d9H(^>F|zakE#0ocfB zP*Vgs(xZ+ymi<{Ozyfk<oLnYh zG#j!T)BY>69k>8ga({H<(5>xapn8t_{O&?wvWP1h=>5iRl?>%%WQJlqu0U#f6f!-i zg%PwJDOFRtc?l>maByj|5rbN?$L-??M027B^%BeHztf5Pv>8ZgU02zalYFq_?#c3Ks)n=-J z&s>GIAW4i)+7$SY5rjDSlt2+@Yq$dGm0Nw&sy_>u6|cd-AXbbK_6j$*g5OWG9S$i~ z3B!EG99zj5gddD2T(f*dSL?gy)^l2`!S7&=<Bq) z!u#dxK%NLx#;^L1%HR^tQG3uS`4fCrsHEM&6{-Hp8hGPV7_M6MJ36hHO{O4R$;I3hpcU}(1_ z3CYp^a+1t4>^A!zf41t#hk$Gy?fo_MGn#O^w?$e;mwun7V2tE6D^>X;(my8a*8+nY z7Cet0MqdbG9{8Or9&QKxF?4_)Hw^nRdQw^bd=OAV6?`AUha3DKml*R@2p$@;Q3k=) z1vPqjSm5`cfDLOD2Ao4rPQJng@4RZ8V=Q=;Y&dJssV-g-1o0Bv1Ma5+lGEd|z3eDq z`e%?(lAx>+DI|CO_v%`hn0I+S4fN6~16_5Oz~P<;zQ-W9FAq5S@4Z)uR=VEq>O!}2 z-sa5GP$DkjN_hsqSDyxPP|V-G5BHt_$D0<(^VTpZ+dL;pPa49}VA1vE8y=+k`R@T6 zR-c!c1V}*8OVz(8&@NvZ-4p;70)bzovWUR7^1rr1kZSAi_rQ%UUj6rvg@gb1e`NAi zXTpc%F8n4q6s-oyK z*ttb~L2P;Q6(Cf_ZsvJ5aj23JIKh*L|KHz216J#tsiuBbfDSTF_|2%^c`p2I>_lQm zRN`~;%~c30`OWGeC8Ki)aW82Ga`%NFrFKh;!&Ad&Amb(sqefKx+YHIR+0Ne5rJt0`1ND+zS>PbG@F7 zrs)zaA=$*D+s5LKW$Z`GK>lxq!~$M`EHi>hQbJPFA9(Um8Xh#LUIIt$zy1A$ZP>|4 zU|;rqwUdISkG&h3hGZ1?YMTLV-SgM2#5TT2MM{l91?t(a2G1pD z6OYu<1 zLHEf#4tHi}yA$_%Wtqx=t2J#20Ri&}ok?r>g@$ETB4Ah!h*r82m-+3X3M=YrVn~h= zmT-ocs0iFz4T!Lvd&IGGTnw2;4Ss8Cy7FsOnol5!BOf4#z&fh2e5`nPeQC6!0iBb( zf$V31fe4(VV9;&RA+UvFQX@BEX|zcQ1_iUi)$TH2kf5vMRF#!A7sMJKba*=Y+wyn~ z2rl+|{14TPDBJs=)*X~5Rr=w3Y}vT)%%u$&;6Xn8K;7i~AVx_)pWC-T#tN9x@EMMk zS>rlMoh4zwXZL3+Gu`}gxQFovzE%tLd_j9E#B2hzba9CU$}kmZ7I5p$10$G*nkE#1 z0|rS6j#U4X*Yn)`xNQhuU^DxRt+h=_w*#HCL0&{2!G!fGbLE2l3HmwA4o%bP8q*2Md{o`)A+ z#tzb@mt+B(W6@*sPyiK8Vjgk}+aS6yIG^~zc5KdWnS64*CUMl>U5sg$2wDvM=w(L= z)4$W(9%o$h{dv&mXT@esotE#5_&4&;Krijc2A%c37NRp>eIyp0A`{o4vRU&ncUAM) z=v{Hzr4wj8h_tJ^i#Y*zF)m6PePym8*+Ql?^zOFT2lgR6(n*VCSERQxMnS(NlJJdf=m<)lURoN0vwI=kuSe*7-b0s?@CLjT$3wWZQ3EHAnYD)%F(wbrVng7WzFdh+FPw@^Zrfp_)o z4-e!M&^Qh9E7LDDY@Rw#Hrf7qJlDJfWA|11#EQ_wQ!lq=vOlNT%l$IkviLL`u8`q- zSmoS$!OOz%Ar!jREBn3mIvv6(Oqd`7#J6%5xFi2w?1}%?BedS%vYVlfgWU+WAK^cD zpag2t#jIm*St2Bt#rBJk#8AkvmtD&tr;D3_sEO{aINM$oUz1u`61n zlLBUA&qOi({^h|`O=hOMtF0>s>3UG^eLLU1d=Or-;cg4X<#OXHxOBu58(m1YH9Z|~ zdnRUlDEx+%`nI24orcjubF~FEXyF8ks(~D($OuQBN|b2s+r*+5^iAhzwAbko0^da1 z30KN(zjv!M!!It4&FAT5y5qe9WZlghNvI1`juo1IVwrt%a@4Nxn@I8UTSx|R{<>2# zk&z#0m@1K=Jy%Qzek?xnX{^0StB|Y3SG(98a_=7LWGj>VV8IKzNl-316f})G>)XQ# zudpL`&%lF`-IE}sD2+~Fly(a@@DFfK9olB}KXgF`Y3pB6kdv zM-5F;Wprdk-S8+rjJ7F+C zalI$Pk*7zx)apGZwK?11g>aWd;mb#1-_|E*G-3P_COKgl8Vh~0m`KH5JmKmVF+9U! zmj3llzVAx+wq$8L-rrBGSzbF(NkiZXw@caL2@g#3hIqy?7}qA9;Hw~kuM(?0y2h%D zk}S`Lx_jLBj@D@FBtA5SqP6-Q(#eH~~3%Ws<%&#FM zn0o2Z%p1Sb-F|jJYdl{avL88H&xN@lbE0Tba^IoOXm^Uj;(H^V{SgrjqTztX>mBH0 zX#}^;LD3P&h6ZG^W{9`p#gXs}-D=7=xwOy+I$hwNlIrk`1O7c1)*fM{M~#p?V)(f= zt$dkhq2c##CVv-p1qZ(M(UgE1tCh(W&MQhF1b@}g#V4K-n1R@=_DnZ?2GgPt6X-8SimI*|Q1T5K<`#6+sHRph#nc`%1wPR}C> zO}M&GAR)v{d5uh@XLZa7Zo_%a17#ti5`9A)yC-ssFA z-}1|S`Z!JI*@@;In>UNeA{v+o%i*iDan=chq>bWE>sw2SnaOUn{X1VSc~2=3I#k## zf5sx6Wy1SgrKtkP3+reOTN3M-KJjw*2fmNNbom+cFL=CLZ|CR+_3FLb5AQWSS6UiLA=|P`ZHw21yd#`Y z46sf=9{AnqR<@oO65Epw5o7xyTnU0bVID3Z#Pj96tg_N>5_jFWp$PAa^)-xu|ScwO^r*yB(B zKj!;bm(>iOTFr=;O~w1lk0Zr5)&UNA~|Qo|q!pYRcM z3b>4H-L$)Ws+9`6hv|M4Q-z_6iS>DaCv1Hh@%C~|1{WdKkNos=y&`V?-mh%d^37ZY zm7s>*oV~zlOg-s%*g1r$bL*_$7j*jUV{~^}5^xvwVq)y|d&{H~nJo~bm~gFUd)sF! z3ecM4`z|~1>Y_g*o-pM+6+&|V^pte4I=^90KnzhIHS-h|d-7`4SSb`8Q`!|ujrcH- zcTP9x)Fel;3$4Vq@sTWoW#{as39p3_o%{0Qn7@BBVKKF5iB#ZIyho4fXg^EE^*V@b>q|6Toh4LYdE0>twKuXea1Vthx%n;5j;teZXHf=v`&(!4= z;(_-_h?YLkhiJdq)$o)$Y=5VOga*`{?u78ngDUiRi^NzrQ5_{qlF7wKovO&03b4~V6vhKq{U;j;#f{9O`K=57F zVw&Q){vcxOdxqV3W9P^ZEyHYFs31+|v3l3N3KXB6Iig_PBNK-i6ujX=aQf>cd>l%4 zcOVOLJYg!r56NGjuM&IDM13iFk#Rjz3SDbrv=qeJ2+NB*BVJ!QSZ3{^Gg5TXWqqx4 z6VjALteIn%m)C+hWNdh|p+Sku1?7tD@R`rj=`8T6T0(ILd7zBdyC zb~n$t;Q4Ty=kY+lcS-M5s5QC8~Gx-$#WJXD>4;g(huJDuubc-F7B!(x-UyZSW6oiUwa%ps2h!H?k z_UwP;o!+7^#eEh7Z{rqXfgunf^mLRVpKn2T1NfH5O{icn?*aCyy_t~?ZW6yuc zqXg|)2_fkWqd&dqYt4$5gBmtoAu+b8V;=Ti_}YH|NINQeeP~Sd>V|hDlXKqdNe6jtj2OzIa@*Vg)Z`sjv};H`U!<>-M6c<|;A!v2VaKQh zn2Vt0i3wi{$$_st@H!*@IF{_JSy+jh;<;JfnGB9~YE7*L|k6 z;ZOr}FJoqQL40-e!=vsZ6EBM{&58mfYn-q)ZQx?XIE^Fa89vM;fF^pUHVEtN8zXH8 zsz&S^F7)z^B;5R-zq`q`b@3sD89TR3nXko%^%{55xvSrIt6Q}IBSGzO!pXTe0^U15)Tjzpor%61*8YzaY zbHuQ7f&@;ZLC7=kYaJFX-d&$rfekSe80~sXITd*7MdfSZT6d>D#qWMQUpc$ut8u0~ zH}u`XcAE2$Ny;NUVQO`VV@>5b8=-!W+DDwuP7=RXme#eqv3XtbpX(}X)AZXjoq=BJ zP_CehMOV;}G$9T%fsKyQhAaW~dih7DH__%egwP;x7Fl+Td#qii&WIH^Y_piwo}66o zq2Oty_0@8s!hD!KAgHO`PcLtuZeJGdand;u5Vi-7(e+J@LnAR-Ae7qIWCh?^Ix{{g zG=F$B9*Mf0SP9GXdqof&Luzfi8Cn~Yv9?=_L95CV=jz!k7#fZWCL%_fYZmL?YAHYz^gsLx-|G+J zY4c#THsm~=JI+>ECZBwri}Y+YP-26p^<87XUT>|1^}4QmP7Ss{D=CVpr!VffAMUwp zSvPSu9PWE>GCSt3jp9(lA@yO8RNA8lR9CLMJl_5hWAwb9x+%D$3Kd6#pym|LOlH*O z?dGYNb5D(?JA+)>63`L3G_H4Io(N{`lyyL6{u?aIVn zCjx{sw$2z`SDr3Z9QKZ@BOZVU-Z?-@@^@;c zJ>M&IyYY>Xwo)(HtUf~0t?=mmu94@Z6dV)+D{|3kCqgnr+-*a`)Nq>E0Cifw=82+$ z)!_MSK^|vSC=07TVV~VG3!y@w>u#yp3^)gLErlrJTzMuN-lfC0|d1bkmtA`FjtnT3Boj5tJWsX%pX!O45^TCl` z*&2VsZ;#E0D0tm8kjUiO%;ZLJ z=&*1PI;(9}be4p+5Yu04^KHD~uHXFHzCY`W(?HIy+H0B4@WDc(W-TchAy3{1{EEIa zrbwiq^G}*_uiHh{Y4~fF(2H)S$av?j1LgwkHF|D+*roaUJ zjR&$N)-tX;-pVVrrbVK6W~G=&`aCAYwztg=(CMKx^H{#{$rkHEJv(9}0#Oloi=W){ z7B4#z7z4x3fXz39S&8+IC{ouDj;uLKKKq8fZp*LDzFXc#1-#ntBp+E`i(6$coV-}b zx;0|GJMvJsGdhKEho)4YQB;$Orof3&&dXyfKjK!W znnpvQSp^CD2%VkSGka)Cq*mwTI(c8?V zcgRQQUqq+6eI&GdDUFS{MU!Tl`4MV%(KO%%1(;#5h8*=KhSM{$seEsKT#8^g+?!Y# zneS_Y>F21=HL~iiFDqXGL^?WO{hE@*5ezBGA~_1S-Xuy+h)b>F@Zex`1<|EzbsQH) zhAkGvK{@m*}L*bmsc;wM|19;X6Y698`PvWjZK zjyWrXt}XzyynE66u;XdI_IE>HEkDMG2nZ|q7L8GTweQ_E6I0VEd9+ynKx0-N%==uk zTTJ@&E@`H*BWgMD_6-|{qpxD-VTTilQPU*Ym7^OOWpnA7y^l+q<9Bm9UbVmRVw8;o zQ%2~ei)YLVkM~d#PF)bu_gQU0n*$&^=yd!5!0sPP+A5KK#b(X3j9P`^+a1=$Vs19u zYR^n-g;&Jqn6ZLcy`@J)pW|zo=3aKPbV}~F9dxU^_9hh8lO&IR%T)Hrbz4FiTzx5% z){7Eyd#M`?md&lyB^jufO7Eco%iMTi%<*Q!W z_%JzYKW@$KvvPZGeCMEw(DJ#u&b{$NM6 ztE2?0U)Y?+V0gXrk{PYM0m<7|hEf(Rr?7|@Y<=6wI{)#Mi&x?!}81byV zv!tSKl@oYehEi=9J!(G3W-g-DbsHqZc~+x0d4?B8?>x z2C89HFRv*zOY!N?9Tpqy%yVGJ)OJKee6K$Y@(wgD@^I8W|{KcN&S(@hs zip3@Yd#{6P&IHVMyDBHYZfM+ETGnGPv8ZXfVdvsqdZd9WXb$K|*33J@m(n~HQgf!T zp!xazm};Sh-40*U=|uN2EiKfg{#Tcl8%#5UC+^hp>z1~89=KYd`@cws)|I;E*2r`F zZ>Q`zDel+UC3>svhoj!7)2)1>U%ngH+{13@>bv@E@3hlI{aVN#`->qkS~V7rIzhvi z*_>OzRp8LTfE_X$XS%mfrY!co)E;xV`e^O}7IPK#EM~j7#n>_VV6TR$7W9Scm{v(t z_hn4&C68+NnkF0qyfs3kC|nCU2C7Ox9i#h#d!^CI%YLG(O`&C`96=|ju@1{s%dK(E z3*`XOyIj*#C-$(+s{3MKC56{);xY#p{WXV)?w!$zF;U707Q?fHg%~$5bMZ-I{_8S3 z82gc&^*@|CRP{f|6|H=Ss9KZ9N_7fah-ewixL}qK4F_^XGZRoQ3qO)f*_P#Pcp2r+ zs;qW=uu#TwU6P&+ps4O#v*jGf(R6Kia;Nsg*@j*f*(z*w1;%P%?J7Hx%R5UskipO6 z@oh2r@oJ@=HOiRvv-BaY06}t1!7xHj!SNN)9sWI8CNJ&V*tYeACDhf11H4F4T zn~?Xj-9T3q;EDQp`&n%PEkzyq} zh*>l!r+{|1D^d^43aPW1->)_OY9zmNQ7xfq4projGCOK+Jq(UGNDGqZ-3rJ;d!_U+ zR8b>j_TzT^J>;%zDf=uYZx2bX7W;o7hAq+`!T!EM#k9u}e;QH0%ET=3KbDP#Ul0WY ziyVgoTE=|;{TmqsCj>Yihz7tsAnjWaK4c67O;zv^H~n|qDSnQ7U)U{!f8vzb!A2i$ z$P*0Efyo>mgKG8s-2?2z(IK`$D_eEuUBD@j9d+KY8G5G)d}_Jt!r%!;k9R{k7ClM+ z`>TPv*Z(KLOY=ZyG9{3)qqDOHAfaazw<0#i-6ss3KVLVMd;_L%0oV`BvPB;NG8JeV220kL`Ds6GT_|ke#Y+R862uCn9_=hzK=RSRP9@!R{=t## z!KFW zM(Mm=Fe=&d;bav4DdAfeR>;9jCxAYH@!PYVzcF=E?z6&LhMwM5f zVlvYmFl8$;{ZeYW-f0Wz37*V2{ip`>w=Ea*sy&Av#RPA=4Bfp6hm_uU+zD-p{EPNL zUC*|V+tRD|soJ)00Aq2*+@}_s*Z3OtQwKu=rhu3QKl8@aY1Q7)IzUy*hV7$s3!B%5 zEt2z3oqh%ez=Ic6gal(eu8z5l+M3=bGxhY z=YVtPCMmF;l0Sd#G4lkyI;`t3l4J`E;?^{Cg#nx>&^@rGAufebGyk`rw zmdo6ikHhqG0Lfg7rj_;x#TPAh03-7*OCuVae7dD_fD{HYiqE%D{?Wpq@=^qVx|Byb z98k^W1xiFP3%tf1!)utCYd^teoNf=M&=@{+B}*D1+2GfKLfa1Jy{+Df@ZV1Z0Gdg(jCR=?toZt1d#D z=7fN$3Gf_?Ocoe>Ux1g*iDzL0c+KxChdF`U$=6-x9=MkHA04h0mM^B}f!SZ?aCb1P zVqOJIvP(0i>~v#b5t#g7{i=0V5p(5qcRj_qy0s3jwI2YUw9X>=5WtxvOq)+evaGGG zqD~h9!P#4Gssgx7!2>Co27sZ)i|A99JQ%Sqq$XVip#lk#!ekr>%^FVI1OlQn+8zt0`PB;f3#}=Op+FjPoo^eT1SAECE!)-1^0v|-VPW+5yez%1nm(F zFoQMe04%01yj5r6W}X{wSDmf;ULK4o_G0(4+@*2%8*blx-$vzR+|s4J8Os~Ah92L6 zKg>{gtD3KSed$^}_#FeFxDSVlOmrOLUdYrDE*1H2?V>{YfR7b4M!>=7Km@&z(!Sdo zl}tC%*{0b)RS4-Z1q=_(3EPpIH!>5PI)Dq803$h(GH3?+)ejk7n`jiX{rARasH3#X z3U1-n4CmQy)IhBm`8|dfoaH;rZZx5NBnl?rFG@WRdKH(P%oqOLqB|iQcok2T{(+0r z&)AzgZC8``zuxDg9?mP*EiwOmm)5`q4A6&IIe~k4OdI#OKPz%5u$94C!A<{hV<$Z@ zmh<@)$TNJx&7t3@l!R~ll8g+yM-fNTm}&q#*>o%XeX{?d4x2pqISEe9JP$D6&|2H^ z^dwKDGVlk0Q}(EvtR$her(1!?$yZ`ArF|p>;8y?demI4>#C3UW*n8C%dJ7PnSLtD7 z4=>}mn))vQgaLVbN?@fWBV!$z| zj^dp>14s=UuW_$<+=~KE&~Bpspe>SB$aZ8}n$Q7ka6g9E_oXb5&S`92Xe6-mN#}0@ zqdzBn;xrxZhEQL&fT939gt^C&s15=OK)EGs40w%CFX;IqNw6dQ98JyaHWk8>!XPJ0{1atMZJ7B71o#vV`CGKc%1uag|@f!nO z2EfEbkkR}39Zt8uzN-vA1%7B{cM2eQ0L~x$7zhxrG29dt__>78sVT7i!4%WcWo~Yl zE#H$1zz-OfKJEoZ6Bv9Xz`z{{Km^YKt3LsFIhZQY0oy%!@&xSW*Y@&asxvUewE7PJ z$Z5Ks|B9XP_aArtrU5j7OcaD1%@;q2F-O~LVwXm-U!*9gr|uqs@P&m5cFWg?d$Wn^3mm)DFTP0Mo?iO1Rc6@UF9wW z(V!s+HpV~$zEl)y6he?@!3`CqdtPRX!}M>Ds_yQss#++iFsfgE$$#qI(;&lJKi@yC zcpCEbL(s81lC*W1?=Wv$S9I{A7T0!IL0 zS0P%61)@0yp&=GL1nomZ`yqbl|IbbNOKHl=%DlR>bh)J+*J+`bEh=X?NhbF^s(;u< z4jr&KZ|n`BGgx#@C4Th_l+qYfSw*p>)xM#D?z5LGpUJ8)`&p>&Oqcgo>b&KIIf@SA z&sKSdVZ%e{1-2(hEkN!9?R|5y-~^f!SThKK(jV<8OT%)Ag}wt4!D6B%g*CGRciI>& zpzB}Ai-Y<(26y%?2QP}2l$7iU+@^j%TRJf@p&H5|W>|3l^NT~qC64Wsfw3d`P7NF! zgNBYyi;3KS27Z&!v_Idr#GCj4MJLZ+QrI_Ol`tsx*B~;-@kXcd0*s}IKdYIJkvuZ? zD!^EdhLi~HN4sepV^o^`@&0p*%xDf}zx_aw-G{#2-L2)#kfPH=1Ak3mTz8$}04y;T zu+rJmfLAO>X)bH9;-?EymFMpVt@hg@714=Nl1H9?pz+$-Bvm6xS%!r(BR~uVc&pFL5ua7;;zL(jNrJH>xMJ>~>5^R=;4T)ecnMD<1Qwfn1i=6)Ng zO|ylqS2QASoJX7cEEexS#8tIJ9&C;gW7`9p@mc80xBqwzQdgQ`LyUOqLAEQ$paKjB z<0(NKo0Sj2^x9hc^ZG{FNluMn#|wzP=KkzkpV$O%u`~zrAo}2xAvRCG&zC7f?pwcR zQ#=GudwQ$pHYc2VxLh_Fv3^qU)${~?UzuylD>q2SWyE)=%+5shbUlm#Qfv4sWVjMD&s#)iEju9!`?;%ry}qo2Xt3 zeR$&Xqjw>T9f>G%f3K-$k^xI))`yUgB)fh^AM(P}r%xNhx%F{o)kekkR^3nOn8Lbf zygJh~t-{uillxO{MhlbXx^)EDX)YhQkSw|CAh35@>#~wL9lvM5irCN7FwyOm&&+M| z#P1t~Wb<;j>2wV8CL+XiFyH!%8FDriFitg~VK+~bng0%{c6(X^2v^zO?pDKY(ZW$i z;VWPb=MWK`;wG1Ez8$?|QM)n2EuesIX!}v(WK6mpDZdtUaO{#pPtbW>HF@9&H|xsZ zM^Iv^ykK({eQj> zQ~+FEptGD7hqy;PM^SFF!G9RCY$$|P zU+@;)ics>;Y-lQoDpwvXdcC=(s#pEU{kG4aKL@ndqqht$=JQ*B-&dX}W!vidg{QaL z$Ftp=GVEEqwNz=9?!LRdK}SdTh1VyCoX3@);=4W`sGo7$a{60zf%?K#0)fB`j%l_d z)oQQOra6X@*1++ss|4%dsQ=zh(<=do?yOXrbs#n@xFZKpH88h7aO^;5bp&Qa6;;*!vfwtG*`~Llk(zvF%Xwb+)2{%Xe|&E-K=|= z#-+K(_z@A|Q39C5vQsSQ3xK$sDrc*qILH8jxTB~oJAKrLQ06*$`0^OE+O#-W%DMFg ztjudB&X!BYS(N-EoyRctI0M35Pz6inJRP>r>-2aJA{#>=1Zf2x7<JB+;}EyQl|eW`QL}I1lGQxn`z+x(7H?23>0VYgte4 ztWQR6ULHL&G~nD#gjn&j>$_WhT9W7!Z~lPlAQs8{Zs-6Dr%)KQ-_7u^1#!-M-*ywZ zYLY0)jxV&G&xTYmVF6;sB|k;RVEIXmV>yJ0fJ0pMT9Pj-0iDjAQu2GxVa7&<_$@=} zL;Ama!%Y(NhASB@_|tc`iES62HSG55c_}*@$JJOlI1G+9AbH;m-DLI@$}yCpIo4!a z>A4(p{2(rPF-PkRw#>MV_LcGM_)-8nyCLkvUJV2KM1e!EI0|2gce{ zNV*WTTxQm`qYl|ly?fTjxE~Es|BW>+AKwh9UbhlO%bqz5tv8a8ONum?1x`KIh4y~E zuHk^jSbOKo_ZY@*WkMK5b8|XqC5V$WWV`QC+C4}+IhVSU`cUMO6wPHFLXs`rN`3IA zFoagG${;8RprrucW@Uq;gd)A}w4u}>+naQG4^83^Lfhl2aLRF5;yE8k!XyJn2YG$^ zYZN%Lp2lm{3MB^NnasM}lUea`M&%bc@Jt*(9bk~z$_LC=7=(Cog|EjZb~YOVUotRT z!4TrpTz7tw1%zgn&m#DJ!`81jQwg#1+LJUSQyzV6MDrL>^nPo(m@UE5BcCf71<)|O z_DHw;=#-w7myD$_Iy96@rCx9SHk2|{sZ6($C`;YSd{s7~<|KOfJPk(iUs>==K(QG>+DTbpLuJjC$fX3y{VTN0@{fMCt5Z?ddAP!~5@A~O z@Rfp9BqN((k$ra-z*wh&qPUB8Q8fdfDjB)2T;#y3^Je~Qgh%OGNkFMc`R{H6#YY9V z8?OZuq@6nGk5fMj>*cIB2}B-cXCLm(91ygg&W5bo6RK?lVG8rMaR571hh}Fh-{w9hHrpMhs04@?EYIsW8wa#=9 zs0hW?dbYWlAMce1OPm~xlhNsHY7i~jMIKAoW$@5D%E=ju8UhObj`^JrZqwf;8^d*E z)Wf-Sac1yAWOIqr;BKEauq*zY0U%~|cd5&0nr1>sSC{^q>*vvdhqz-n+fx2azb`Lv z6BrR0cTxZr&Wez)uZle+qhb6=1`F+lk!s_QF2K~{lrYP|70!-S`zR?rspzjkZM{4Z z(EZ_g%>igkTzkG;vF;ci=wMVh*CmCWU!C1Pj{OCyxpUpwygmO zNVAUdp^62kw`m*dDpZ;2W+Q^GeB7=NgG{D8;1n2aAq_>DzAOr@JnJP5(U-FyHPh~s zTLzB77(H&joY_s}Y!xO?%-9OH4e-tgLL2`X+BA)3ojgco@>@NtU%3Yz$9mk3nXN$zrl;zzvXM!F*XC@6f?91Dl>n@~O zPjI1*t-r3*23LhDaSr;x=c$_3hEtBh78FPv7lEAeuBKyrLLdYm_}h&5=kMeLW4PKL zDuu@Xw5>^rD4oaEg-X{xl`{J5p{Sjw{_33kyR+wZ4uO~2+5LCth-A`` z0O;(e1@wk#C7>Dl;D%uv{2Jsl{UJ05!oZDz4rqEwZMH8@zrv%C#;f%NiXGxt_Umzv zWWk%sF3XFcnV0|z{?jun8}$6GJ^s^k|G_+(b%0aQkCERy1yp;a>AN!B^yT-w7c9_MRuKQ6Ifuy^Brv?Fp3CM8Ta>q0T+)8 zYGZr}NuD*tRymvfE5N!fMMh3cSfOsq?@>)4z-i`frkv#aXQj}<-1J1L&Tc1DnXGEe zAMR<|_xk;J$a$b>)_iZXw^LhjhuEgrtNx}BU^<^qj#2S9!f()K+LuR&+PCsSH53CJ zjs8O0wzua~fToES){U34jZkQk1g15B_%=ozSd~L8l_JtKeeO4C4BWq+OE*FE+aO&4 z$Tt(UWTAHl!C-fp0Z8pX;lI6#bO8$mAcm`@($Va90`uJjmb?G%#^1kIcu>!9 zU>irsWwL`@hRRkGj#zCe?5{hx_G!WQJessZR%tz9n#o|W78&&-NLRo+D4xdNwft4l|F zXX>@KBlF9*du#}DzaGTNPL<~W^k&@?T4><3mit77TR`Yo>?H>)nD0CmAoW!wl^OzQcskA7$R{wo*pn`I&uKlueHSdL zLyNyeF69n=3%Dvs<$_+iyKTeK#&Il7zwxHx>RfrHn+O|C9F-MJIIqJ!^_^|5@v|Y? z;Q5f>mJ={~_zzJBWX1bg2!W-u--CPJai+U=`!}joKFuPCf5Tp7u&1KRBmLW?A=Pqr z2BqJSv~(Z~Q+c+O=48fi-Z53`cITYd0UW`_0ym@tC1eZ@mBilKa`ofzigbzU=$IjYC}!x_Pn_nfJq}EDteSj7~N32wNMzT&?*7b z>U*ty1P_hHL-XavRfqPysto0nh+(>bwrz>mtwcDX63uB6^SZNJeqnDb0eJg(5|3Qs zaG_n#DQpEcTV+@jWLDy6=<2$v%4yn#HJ16)!DBuLK6W+o8ZA1AArDj8FbGji!g*l) zfMCz|_BNA5)yhld!i>#cIj+rZ5yu__#RR9El*4O;5f0J`8Tj=Fex=CRw1kLsjdN@&V-gaCrStD?yv zIIl{h(_^pk5n>@B$H9_5nOBH~AAnoGo%yqmvTIe<JL0kqHX~0A+%hXAqo7_G!7O*(N!!#!EQhEhaF{dunPyJJRvLABlmHJ$@ zA0y&7!)fG^hSBfu1XEczc=oM>j)B4Yvj1KskY^!`*9UMApS0e> z(VmMPU}<+b9pRqOOJSnSRRw&3-8Cqmd^X^CYUHs!U>;LI0kaO>MQ7Y*I_?DF@;1f; z`AVFJvm#gYslQV-H~Q8-@arpMS(b(0k`!YxUoIm5^@&jPhtW@9BB~eS{Cg>{Ph95D ze9#g<>$w6(r`prTPqBpDChQ4@e{G^Xn)Z`cpWhbM3W)eUR@)O0RsnYH9F;2FYJ5|% z7D738I#fYm{k?WsNsqZM;8_&gb*9}-y}3A0oNZY60QAt3Tf7K3AwCzS%@PKUcd5H2I*x+nm+LrQr^1c``SD0@byisQKr!@p+GW`=LTLmluUax0fAJ|C7_c*-PhkZ<+;Pq!YdE9|=;2Vw%TJ?8a_?Yn`JoXg%hvL1jnRtNmRM1v#55#?bD8&j{daWxeH zWiHuw{RC*#m8s*oJc4Cj(>O+jZ}j))?=iMbgZo(IDy?EERm4^wVv9_v(_k8oZ2I3V zymmJTR6SoUbJ_B3UJn*fq3PbSIqw&4BCtA1OHQ-v5bf2py4!71Z;TmPUNFwL3y7mV zm%8X7=-bg3NX$u^J{P!8Bi%|5b`yfG#!*`y?SL@wV^+i8+r#DmW-qJ{o3QEt$9^%vLxU$JtrWH2NoL@n&toqVeX zX2tE>d`Be?0`yfjf%Z1i|J^H*O-}Y7dqV%9Ir#FyFHe2|L42csea>6ZEt?#8zYehs z6c)e0$xK3A6Lx?PLsifGY~z^(yXN2jcG9e8q=b>?N#`MSM?g9zJsdjxf&>3yXJ>iy z7i9A7$;6$yjh2+EV>o%TJF1Ypyo;x*?zz%XWE;+@Kirz+Eb{XXLL zj4C(J{|u>Sd6Fu0Oxkd5ZrF)qVrZ9IQ$7eftC=z9jbs?ZKZN=2ZZz_E9?RCB{|rG| z@3RQAC(*Piba{^Hm@H;m=<@R{OnM-6I7|wqnr&jpw;y_RKo_pf3%G*u=zm_SeS z-<>}_BmIhYS~5YMhOV+@Au+r(mn9RVXWK=Ln`1VwLukRj21G1&r0AEtI7E5{ZX*BM z*un@_9p@O%H7fvv;%|2of3e_|+|qE4fMx3R+B#nvHz+u=&4iitD{NlpV}$aUjN#g} zuoI_ke|P0_yuT|Hu%O|Wai-sCG`be|CiG|glHJ(QhwH}f># zN_kQNP?g%jExRMosV=b`NXw{y?-yF=V_rA$B6aeaTzlI@%UwBNFUOS%kH&Brw+}ZV z$>&&1`V;8z-C}#>(v<|a5pN_jG?hc`Go>fXJQ(0KaNoqg8dH`^w*sU835Q-5{#pLC zd;nVss*?IcUIoBr{s2-{m0NaVpnKi!jtl&;GB4h^l*^^I<-8U2^k^&6O#I|og2Dkp zKr}*kznC+#{Ox7NZ{afWijBcSb34#j!lNFd8>5me<<3K)97|=mXA*3vs(gg)%?_EV zS(}F6_IU{=UA#2pA^2H|7VfDlB3{x6LMZP7{_}kS=ilwFUmR=xzdz)d!9j;?!%i;u zA4-V*fp)kdT23VvCr~W z+y_E#b?|qMWB3BXQ>KY4ASy%4VDfO@d(JI8=!GvJd_tb=vRlUu*CY^qf$bBYBFVFt z>#8U#ShnV9_9c4Cjt7)!frtu3!?}jUgbWFPIGHSI+au_xxR*1UirO-WWCjxUgvlGO zt?JC){!;s`lmlCdwnF=|+mP$b8hJ6_Py7GSyEARpQ+DQ<^@zOt_)LOzc2x=uxR?MQ z`B>!3UgA9#{K<*$0|ASj+N8=8s3LFvA``84ZN=kEoO4O%vdrM*&(Kx=Yi4aauZ-C* zPO;5^woaRmcf{3sY#z@)5KF&)*tthEDU|D4 z|K7C{*fo(bZ)j|!Gu85$Sw_6-hY|1jy7h&^K6FP$V8yB$N;mGG-Rk~W`Mq0dC*Q%6 zMpe!;ytlb775MxA-N+vGK5xDc!xUTCo$F52r_^(E# z$ECvPdGlh7OSKfyTcl3?=wRxRuJjTwOzY7d+6(AczDPp+d=G9zUEyDYt8Fh`AVDJq z4PJ|vCS%Qy(66*6H3l#?Mk+QUA-vo;j$k94eNSCuUFiGq+{~h9%+lgDmve>6O)ISg zT*^KjQyf|UDS!&=1Z+JySajnAUK?&RlA|ZqLko>CQWs{Jdc~+t(NbpgL~P2rk{B_a zLt~`*@b;8&k9jKr5DN~gJWHB47NGPCAv`zk6g_H{%~UuH-51{`RoyDrmH4b^DN*~o zD~DK^hIsJ&xt8O$Zyn?iVd3&~85fq;eswlJq30U)PreKy;8;sKIve@JP}$jkx?6uS zI!`7K=D!pvPOhF*jpLC5Vg@feB%$kn?SrAvs_p<&IBq5SniflwUCJOl!1> zZ6N(xbPx*~EApRh>K`aBTlVSHZCAO^9B-eD?)xyIDf5J8LF28Ja3v$RaRC z>q?}YsT92R+$0X`YMr8wE91@A&WYjkxy9$=;xuT)q-9)k3|C{16%#|3Iu-6u_V>$- zv`(in;HlOX-=p}SgEOI>+tFAZp%iCR zhjOE0gS{SX&)89kFDel2BUMBaIKvnl|xHi1~73V8q9mNekRug>N^T=g|2;q1E?qm_}q?l=F-aMEI`-U%JJk zP;K`bg!daeQeB^E08jX3K{iI{Xva-yzwLG4EViepOBm#h#XIC?f{X!RLlmDsg{I!N zb^GKP7OHWNy)k7=JM3^X1sp(DOQK!PxagtdR!i;nu$*`}TGjXFoZGEb@1O6>=|0CeF6tzx) zJzZcND;f+tKBFTCf;PN(ZW$v*Yz3E!U4*RQl2R8XcA)6oWCVhd)@u~if_cs~c?G6o zOeNS|5ue=w;KenBunTD=W+27Et);kWMc<3JDp07UX8c9Fu8da(a{jx%PG}v3rU5@p z9$B=})#~R{NT1iY^`@(v|E7)+1aci3c@{Cg7d~0^6T5U=lT!Oj#9Cjp#&AUk`}g~*1$=_sLR5Z(zsO55lBC4vW7 z)V09>|Hb~|yknoF_nVj~F}AcCnkm~(g-o$Rr#_3FqPhEBsjFAe4Otfs@x+fUK?SVT z?xRn8OuWXQbB0z+1}+TK>IwbGGnYH*xJVhrChH}IWM7=iVlK#(@_InTh64Z3pP*!b0C%Cl&FsbkYY!I`>%oTjd}!u|w3&~Xc*V8u z9$cy4Jr7~=mK_TB;$T947G2(zXJ6h2HV)@&^SN*Q>ZA-Qn!SMeQid6q4)b%{n8G~| z*q!52^z#Kl!^Q|6<9qiWFBbRqCQ9b>ZSQWmwzFrE=W#u4nOY6HCXQ7rPgfU9^=cQkYxhEv9vhW(&=?%L z{xdWES8?sm0}>1Vs@B&=WG%ww9&IbUcP`(3f8Ic*<}v&P>B)|U`ltPQzH`h9Ken1} zmP}G~MXc-gRgW_1q{k~hlC5=QUO%ldUv7Fe^(?>P6-$?D_R!S4Vu{*|`wvR)KXf6t ztZoj-X7Og5f# z28RPr+^yNWCg@!8wU$8hO@umw^Ny3vY(VwK1JRiK4kISZj9Hm%7MabgBYu^|dA(+v z7FhWDJMW^xigW$+-{aUyc@7ev1($ugMrhsqoK7_@Ey#Cmdz-b(_;4t4CIe1v@1B6hRS?J#sPPxy`ar7AQ6h`5}Q=^6h zRsEOeMXkHeR*!0p>|jeLoI~{9KV4n<>?)v>-nxd}Sp8C;9B=y+QhRG-2QO{L)?ycu z@^G~!trKtbkKlz%yJjVx+8@W{YM6$eb7^y#7!Uk{6xIzHWO%VC_S18;7?~KkQM(r> zYie+e(xlFNTt@AjxHRtN+ZEIUF|_K$Kx8s<+jZxC;uJdHdeZrd{|mP}`pBLptyTXU zkFS(nl*O%zTJ7R#Ti`zYkNS<<*uRXreze&N_-cQa#$aC~sW-tqU-t_{-(B&z-KWDj zsziIQU7fK6%~gvm-sNW;@ZJ>b}su=bxt}Z>7Wri-m2}c!kxgw`R6Gk*@|79 z=5Ni96~3lz3=6KD-9G;9j5pLgqqkqq|K5g}6QQtjDI51a5<;IYsK#*bdxUsZti3O< zz(&JlnpB&FDj9i=c+IKTQ6rwnxyNn4JX&;vFYaSB2i~YH>P(L&>sMj)v}XCEwXZ#y z9~-jyJia&&TL~<3s=a})ZF0<|+0WYbe6Ogy5~5k4SYZ6+x}1MkhgRuCj8O7L#jdw< zy@R$&7w`MPWf;a5PGv{I(Ym(#2GFp^5O$D7qpJhF29)lcCX>aTiDdr~gF?X`fo>e% zd_H|%PXIk%QH=lQIp^;5ZMvzhSjDCK{?4l2pt^6_(Th=GfazotQ%W_EA7OGvjHW4Qr-j~WD z_fu=F=onerHey788y0p{mf5E1RKHouO-99ufa-NM5PkRn4yYF9d zZ##oI*HUEHDAiW~qQ8yrRFdn9U4~wB9HlA73<}j}A7~$&gRy(m-HCj-$LgiIu=`*+ zt(A;)X2JKX?q-@G3;`8NcPPDJ*uj#9+~&ti<*wN(P+PnW&nFQd3`%rneny1@BnY#rsRbzF-7xb#;UJ4uIA~dPs?=M6kWu&=goWSy7d#1 zhsn!0#U>G4eM?G--bIq`%>F~)anp%@6$AbhkNugEaLpTo*l5rHF(|q4HFi(iy4cRXl*{p20Ti;9B^TLw&4xb~6JQk=qtwpHn3N84o6#?x8pEZ92&Dc0(!t8t4pTRG2L5+*U% zlDJh|_vfL^P3p6%7NY0QwG`O&Kpy$wr>CxaXt8%s`f_+*9eP6Fh@?4L@~k2(EqTPZ z^Jn|#O$=tH*F2m@?pJG-*Xb$`x2gKz znYC(4<4CusLMY;^s1R#7Eq!CFchnEL<+tfCn*;jFaR%oRha5+n_O|jYk+nkufjnuH z^a$v%9>RJ3v8qMg5iZ$|%bP#@o_pB&NfWyH?>eTU#^UpWA%(?@)rBUSJ_o`NSmYPh z_*E+yNUzGJ#kf?q_&N;VScEmBoCRjJ3Kuxb%Y4gsO$JQ6NRc%=ljBS=l^w?{>Y;l5@BkCWYBw21_6PLr3d%}k?OPBYILx`O}lR49`A15d_Psw^S2i0g+3}IXtxJ2=G)#|mi?MA=VMdj>dbP3?$L0?Ol@m` z^i74EW!r0a6&1jB4WGXwZ-`56jcp&uCKei4X=i`Js4>xFjIBMhSY{HRE=Hmx77LOe zrr!!ip$RA6U9#Pc&d!`nzt8sMG>>PZ!rDYAo6ra1mIcUU#hBN?^>Xs`GMPoYEeKnm z3;g@?>a}BSMYP0>1();}yffu@%e)6Jd&^#b6`wjRb8@S5*w4o#ec&<1sHn+T+iwfu z-oIh~I+5POJ|q;o+mU-M=``AA4-G^5d;M#9*B0D^jHErCGtHS(pXVLoy04k+9^I?C zzF1lyF_;rCl|?*3QZ`dDyhu`?IkDYmJ+r3H*Ra`qaEwE2VzU8NynIFIpoO=N*~!TU zPExVfDZ|Js->Oz#uV?(th6t_=6z z7FZ%Y-d*mZJJS%F9`NnpnDg+j-IbuLaP6Y;6cHM>m;+l+%H}pvMUP76y2F_jf|7bZ zw@Ld|jl8bn@vj;Y7>G4Ds`Xeqa#tALU3_ZcS+Y-SU!IcxzIG1LA3pcF6^2guf8jya zI+8;bc53%H=k-Smqh!qehfVZgiBoJQvhS=^i676Bh2};TozYhcL--N%M91Di77ht% zpN>J-du~7JW-^5sxFPe5Jv-nOp zoOteD<=E3c$h-0OqWBHu!j!O2z+mQ9PmY}!umVT@ps}*FWN=CzC>AXKQin6_zb#%+&4RCI&1JyKA zhJ9QX5_idh2akX7tmsqe^Id)JowtMF{elRS2qH)4vBMO>G1o{O7KY^q1xi#$4CBtz1x0D`QBm`apFZkU%m5&8xdRt;cl_uUke&qPqk=> z=MRfrO7`QTNF6&P2Nk3j;at;&s5$?w>=VnHJD0u*(iq5vvW0Vm3r2o%lyDxSX#S%W+_eHaSP;T!M_CyH6pvryv)9iFU@T|u}bdSeP~~0G~%L&!rHS5 z8fZ_M*to1YSj?qy;OCfYos1on4(avokCE{qUsP0QscaxCk0hG;o-dh~MchL zV6et5o^*+$4BEeoBoalsf*J0~5pU7F$D?1n4n;UREz!^lTs zjdw(n(Iqo6A;z&Pc=|x`=9Hx6@q#_?Ae5B{e-ZJaE|*xH=JT6oH+ZvM7;QWJ_J+56MsEli z$~-@(^Xq(pZpJ%GP+CpZ%bT6MG0l#Fe9o8(6_jkHGCd0agx-kB%D*?ybmv-?TJ3a9 z*{f*b2Kd%OfBprxPjs#VNt&kho$R2tL)vUUtlgwF{@| zQDX(lP+u>QjmqUYZbKsxcV>f2`aro<7M&?j*(WHfyjE)N_dVs6zlX=%Pk-UeB59{z zyjKlK<5F5en3(bR)w%D~siOXCs(xMlIDa7~&PI=#wVOlcuLcsc2){b9+VMWW zBwY^kaFzCWdvJMsNSFIIi}j>10(&s>WBbEb|2?lt#1#qNT)LHHf1{n{8V57K+_K9{ zmn8_4uxV27f~MKWTjHZghlQF`O~tPmjs0pylvCR4MiGl4JNfA+sR*Q2o|&E0l%^o5 zr4_Evsdmrydnt3CxxB9F4BWeN(|&TTnWP0ycr7h=;SGigq{n8x<}&*%brNLe9oX*r zd(4T-yHwj?jx4|6p4i_<+qbJKXv?iQ@_jAWH2e_v1aEDX)6nGU_--6_se z==`e~KUackzZ?PV}(zL!l`6ddZ z*8EBhV#8m5eZXP^`~Je$PXjXX3sdORupw;12Fa?JfJKpWcC!u*j+9sTe<`v{p??%7 zR8>4W+`r(^fT>2H`hiP|IkWNX$3xj}1`C>Fc6V(@SS(+M7`t2=J-)l^Q*Hh#Qu@)m zk-b5e2YWDj0~!B~2CmGcc4VJqp^llD?+$WD8;+ZjF*SXzxp0WG#52zsBYIxEyng$t z5hA2ox4jEB&6%GVc@8)&_1L6K%~k!mra1)jy#macug}UlcF8i;={MPd`enKyfM&G# zxZI|wgW5B^YkuSA&-KZc+OOWkUw)qxFB6f>-yCpfF8jGroF^-nb6l?OY-W=<{rYY4 zMJ^pbqwb*@XH~N$M#dCFF&H25Tpe-B!g0^eelxF=&Wnk%GcClnNYbtN`|xW~^scGF zd$H@I{XXOoV?O1S=O4pQ9N((E{^ykQSXQeL*pWEi;@GMOnhSHRATXQ*T<)p0)Y!)1 zt^t`*gG`rYR=>@2J%x^OB!*OYkUa>mFIT5~W(gvSladN}5N|gMl#k#-?)iU-*lfBm z*s1+xcnIA8;P0r+eX;#g&s_dI9PZ3o$+9!B0v#+dulZkWd=P90sr2;rjsz`2L zV_z`J&?nu5qq3cxV5D=eZK!-kb8A%((W;&&lb<3%wo2OhAZE%nSlByo+_oP6e8*4^ zCq25&sadIE*LEtNeBi1NzAB2jd{IVm^XyVukFf3qMTJM?&WGM{(?EH8Kj7vcRI#Xg z`JvZbW;9dl8p^#_+!CV9*$h|Bi&y*hE&Fxc(2GQJR(m+0!p0fKSOvbvG6^L+4!_~c z2yN^FEmM-bocFK;^kwfwAkAl?Q%O6nWxDB=H?aTOG2J)Lrj)7yr zf@b!&lDH`Rb19G8b1PdDudcA$O7R%HNmoZsv4ot?Sjhl;_ykwhny>pd_IKEUxN}_k%xs7wAwdu{tzAwzj zKT!I#zEVsa=y z?z+SQ_21@5dq08U*0CzVg1kIQM5ni~qksbcVqN4cvVgvaUGdrZL3y#=SK-+_3m&ZRn2Di2tbk z`7)E5U8FnK>(fb>5FV?o>sZaWBc6rV=ytAj-Y`m#&W}|XXcHJjQD)&}1Z8-p8e9hS zNIiO|AZV$vZqh0^QLA>STBn5A+Uet#ZgYx;Bwu*wr4z6i7--q?SI zJ$p%y;h{ouBP(GvUBVN659xw`F)Gy@#jR zGb`pKXQ)&uoF)2>QHHF=9wrJbx+G+$ML4`Rx82*YolmR!R_yL;_O*Wux<%HTTa@j?$5Kvo=3XwZxhgI1dCLE72bZH>{BYq6QYE zP}`$>{g$7<6QYHr?S==c)}#BRfMx6uX1QxBv>sAkh8sA~ZYjF!`9k2fs@!-$-{fVx zXKc9_ZYpT*&6|hoXa#ed+`GTKGKVMWo1L#c?99incgfBwX?utlAI{A!qE{dO{UZ-D zV`S2iP4F!)B+B<|OA2Cr+%=)73P_p8Ms;Wg;M}oXLb42wDQ;r+`y0i*BKwRhteHb> zEroO|z2*sgnK6hK{a4yqEDl|iw~GK@c)jr3R;3>!u4`*;*VeOTMphR?EQ{Ux;p%+F zH5H4e74v&?6V83*UHx`7O50Mg&$xyer{=evU&EBUbSCKR-5s7Q^-I%^hq&Gnc=csQ zecFaD4D{V0+tr zKJ7}~X#c_pvU$O-z0O3Bwu^IK_w|>R^pW}{52Ys~=W0!2O90=x?e0D(2ZUrXl0&=^ zKuMj(%%>%M|BP*JIDSxA&@|sB)D`&ZhF_N|waw22SAU;r!KRn!*dBK2@0bIjSWdHJ zgVxR+r2H-!(-&9Vss?w{HyVYwFO7`@l+fw?@E<&w>vW*lv!yJ%7Gw z-IHZgGoM>?K*Dprp_ymv7(wK%Tkuu5r{mW3P<#OFdP4PPsOi^K4H8U&Wt#9(`Fmvk zVi)1dngHT6;={3i701C#&W8(LR$%f@I@tW>4;BII(1 z_!9tap7yvWuXYbf#)zYnhhnh|LfF`I)Wdd7(saa=*b{!IQPl zy?O6FbQWqt*L}BL=dayo7p{jtMP3y%Y+#=KK9G%*Zj*dHev}$K8s2`z_Joo4rF4Ge z**fLzYy`(=ZMArLpA6He3uJJmVe8!~{ubMx_i4LKW`llF?Q6ndDi1e~?op}pOo5k; z`wnSyX1E5vlx7UX0Aw36;D7>*zvRAXY%6}(MM=YIuWy*Ss~MQ;)MET^AjrO_`VK>1 zy84s_OpBJ}M@w-IH8NcTTnW{^)8>;xO;bW?fba6VxfAgLu_JNr{@5;v%`m$_ktbve zK+Yj3Zj~kXS=S0rZ&`u*>xbQ0(Z@i7Y67U4o6Y6{cnm=TM@aG?Ve1 zUhPA1ZQpXUZ^G6>1BS?;z(oq0;RR?u9*PI~t_xyFigG$V0w1 zeB#REhoHXBX`U?zFcuVcy^9FS2f@YhY3X@RbKmz!knwkx9PFfg3l#bB+E%{#3i4G! zNt>;)OaI^X7{Y@>2O$ek?cyr)r>agO`|l5w|CG_;f_8>?cea|R!RrM(lu^ZTB?C~N zJojLWyeP}S1i1lw32e}7dwmrU^SB(s@y?z0;$Ac3!UrwXV(eYBYUCkad4q_!^VVwM z1pr>uulDCmoQJuk!*C~ISC@dcX10Tv1k}PFf~-kDznVU7jI4Xw!opuwbZ`Gu(cKNR z8?Lw{Gy$Q9z>)HfK;+*CZK3kv|NcfpIXfOa1;N5r0<@~Pf)%jZ$i?PWrdo3Id~fv z)3fwGg|GRjDC@1$U&z5nXv&7xX|G58(<%AvNpy3#B6{k7n*riWgKPLzTn){c|Nia& zfxbBXCllyQrn>WpQ?sTSWOPoCm(piS4KN1R>|2c z?D{`epc)l}5=L(4viG2%8Pp@R(Q%r9v~aFsdr&Szz2*jQLx?S1vcRK3g?pARkRZ8< zAOUbUo*?6*@ZkHg?&NBW-{vgHyLKEs@`cZTd+O_J8^u9&5aVJFQOr+psWBY6zoo(p zGdVo@C6cqY2b8Ou-WCPfpYOAaPLjC{yvcR$j0VU<#Nca z#g0@B&`9O$%WP2hO)ZyiOOz#o%2zq?425|4)RF!@FeS(@-|NtjNK*9Q<#wq$Dcl5d zv(tGcxk2{(jW@EZqvt@m#ijf|wX#v!o8a9FAZd|`yI$ml{!?CBm&f<@*MjtOkb;{C zb~5Tq4DTxeeUD$bA7oMt>O}#M4I>7fR8>_0D^mdhJY;cJ-v&obe*laXOJ zt|%rwdI3^;m|0im9$XAEM+0DawP;=HI@!n}W&~EV2VNR^cF>|kTbv)QbH=Z>q4~zv z?&x%^xj16WQTam~FCLTzu*)SDk%3Ef%R3L7f&O`xDxS6k1x;C}G>GVmBmtqd2+f~LXy2Buhcxi8>M$_&Z?CxbNa zN8J)+qxvRX?XHn1$kqO8Pk;Rw99fisoqJFMOp=K+Mt**~;Gv2>1;mH9LvGDtR!Ve| zgF>7v_{<}6k0wgDLnUf%!)fUcAKj`iIoWQ@Ja$~_K~RF?-gcppPrE9o$MT4&=s`Y( zDo|XUWu5_^LFl8fGubA(HUfmp-c&uw!F-DR)>^qx4^-92f(q%0V4-gbU^x}nxwg>O z5wdCv?}DB?WjZD|380&m7M97&qvqlu(+bpD?4Af9feM>%382(25Wy1D zH~cC)4Wz8{1Qc6;Ib3ev^zvlCANYLu6bc~k!#$3ed;))b=?GF|#H8rq9kXqvi*qcS z1dNZ?y&K+0RDLK`*TN$$P29eoX_pI{DeBakAFh-cRNn-ZgSEx7n?F+|DiH&LDjyiMjug({SvimZs0u&4h{}KfH6R#x@*)fJQ~9TZ5F|GNtnxA;JCck z3Xri_33G8j?fL?*OXPNSJ|O8TUB*B54+_z<;HRam>KRsrUL_|>kYCugCT8LyGJqK} z-N5D}srI#Xbl7uLj7GOgd=>yVz>D;yHIO?O`~y^LR(Ls$q-!S!2u<{XiVW3&>|N1R zr{hXVJ6AE+l@?Y*Nn_q2G z<$%7Kw+*0t!`^k3=j-rSvkrNvBl+||FxEh>EJXAF?QN-kEGRA^&l^Kc|Ifr&3?|&= zNKgPCCFEdM3EWe18+e2daH9%v8DROF8-)iN7{6T(@SkmVR%7RpH^4n;A&cF5fk(-d zzPywQJa^~QKBHCdcRrtIRr=};unq$@r0xO>;oN@S1K~h(P3wT(1D<|h3*4mn>68;t z=dB%u$)E$6%HQeeA5mc{1*T-si9b`r<22cT#~XYFo<;<$0gW$Z{5BCxI{8^;ttSx8|JQGKnuu% zm-$RoK3ezr+1c4oSEbLZbbC@*Sh#VQK7&2bo7Zk^%?=0J3_QK%K4_yH=wzGyb$fxs zGQh1tA!{NEjx2y7+*8)ptDfH+L3`Y>p-Q*ys8a609>h=|CFx{c{EtHajX z|NC*+?3xmAG3%z(?ma(&b@L}N69EO_p&?Sh<7H%kTaJJWFT>WwSSB870WQ!yIa$4X zo|^A0ov1A@fcxcc0(TSvJr=zsqwv+r<^O=ylj!f`z@)MJ<1y**wNa^WEID3J^45z4 z*2POaC%?MB-X3^p&RL80(WTbShaFz5QK}@i~E$ ztM;{R0Uq&mT7Ul@;DItpHyYXHG|DDE<=a@ZxeDm#`k$xck5pXTpL=`Tk939y@<11h z-dcW?p$WL3d1n&?n-kEAB34G87B1jk1kz7E`|zJpVAHK`$(gT38Gyjk)z4*}Q$iB} Dh&v$V literal 0 HcmV?d00001 diff --git a/assignment-2/submission/18307130090/img/SGD_normal.png b/assignment-2/submission/18307130090/img/SGD_normal.png new file mode 100644 index 0000000000000000000000000000000000000000..e6f3933e1bf979fa7b3b643d8f7fe823610109e9 GIT binary patch literal 17942 zcmeHvX&{x~_wRFz$y9{o7)vTsQJJZPP=w6Gkw}^6nR7~I3YjZYB~xZHa~vU4NXCrE z6q)DwoOAbS`2KwV_x|s_Z|=L>n|Pl6tiATyYp=ETTAwB8wwe;{;S+};2%=TKaa|LF zNKg<2!yY05Pb!L3iy%m0So!+ZJ02zr!_*14rtpo9pPpO$8#(0#M&Ay4e|z?JkV~h= zSYFhb#N24RZi$Qu7m25L)Ml=|b2|I3`@JUIeCRVylJ{)fQ1h2vxsGhxh2rFD%P%8q z^BeQ6(>O1aHG{p<-VxEF-Y-j)y(89n#`R$8B>s2I5OihzC?+avEYncT#Gv!!zT}$78D0V5W~}%S*w(A zPB7Liym`cQHuo6BmD^8w=UrA-sI1>9T`0u%^yQb^AKD?PK89>&8c3N7f9i@H9|{Jo z@WFN^fb0Y1sJ?bknL>fL)zsPx?lsb#d|iKnc~0MWgR1k_^ZL#drBa*kkwG(zbNbn~ z7C63N1h{Mff7ZR(`Za#Ik!o*xcFFe~#a9^%>>7w)XWLG_X8TR}P<#`Gqq*BM_Nptz zyDQn21tkrV?ktRNk{L13k;_u3m>XB3pBN4P8En|}Uw=KHdMo}iz4S85;L@bp92WuNW0u7kF{{RLL+S2U?usnWz@ zRX~`F!^i_{_*m{y>^M|gzoNc3fdFTPh4urtbA%T_;hLd^HIzg zt91W4&H9RIj$%5l%rg-$qVBPaAIUj7D{>o_04;WBYSlfXt=ahz|NQy$&4s}bJsG$8 zml+86VOL#*@5bs>tC-!;b~9gc|M%~Ch?=xp@j?b4AFTaoL-?#TJZBY+4sd-Amnd{w z7-*l$P<(!TY{+LWr}Tc?YhI4c08YOn625Jh`ubasokxv&FI8LheDGcGREocB%Yfk5 z$rkt9_pnr^ge4q0TvPjuCIaTuXH1l-2G7;{_cJsaC&p{nEGhHFN0@N7`28KXm90@E zOJi*mpE`5bbTSez+9A#8z}Z@X-<|W`m`yz19SAABveXq_(~Z_+;XG4L~tltR29G0r-#x;`ki$SqfjCMXG>wrb+Delu)sNJ}AD@QXmxahq< zq(=61)KnOKhQMi8WBiMW?PD&oFaevv65I8ePU8xfvl{Q}v+~eZEDdiLwpS+XYjB&9 zysEORO()yg{cpWIKlZUp4NTbof?K|R<)iU2s&m1m!*227;wIHz8wnE5*Zl#JOs3jLOYNj)0F*v(#Tjd=?G-uwLAxkF}aXF9nwWPDlIN9pLoW}Ew{ub4Sh zvpHZrUe@bLrB7C}cFYARJ7Fap+26HusJw)Fv?KH+s%~w-S_EZZr9oUa@vKds%5G1T z3PJ5}4%&-~h)BB3U=yD3{%B8t2%XtDE`sb`CNw;_BdL&Wr z8AKRvYK%OKj*UI7nd7^+**^GUv2qDr;X0R=mZpIo0&xs;ziC%dlPRBXKM zX9tJDleR{pU_L$Cf2>nr9kqo!5SkgJ}TE$%0O1 zQ-T64dj(iAEG8Mfuv0NFB2Z_j{U&EM%qG3LtS&74%o0|(nj}r+wpM9wG=Y6a|1k5PqnE~aCgfLFI zT;O8(r(Rk9^Hw+x2A?J6he0D}f6H6_ei{unn5jn+!l?c+t@<@46b|U*cq%Bc;*Xlx zj0ljH-8oxK2Q6#8GLohi&D#y>46*KCSa$r<%{#zRC zy30XQilOFEM`G5&K_4?|u-}V_0u?pbE!QV==ohNEL9zVh)j`(V;#;Al3-Sl%Tll7G z0jVc`_)99d0phr9qgQmltp)g7V4Hvm$}c70~yx-hU& zF_-IXSMs3q9dHr3JKPVxn=TAALrLea{#_8$9IVO@=T#6a&oR*ZL3?g04&i1;cj97( z7Z34TDRp zHd28I4>*=S6Mj;Uc|GFCG%&a^uvR%*>0vi+xhm$8P%ML3B82SRBt<3Oixrr`U)pd8 zduA4xN?@$SYzN-Ai@!ywU$vGaS^TCzu{_iHYq2Ea=bW6JuJ7vPnfMM@xXwz){ewae@RYN>oW>Ep+bz~Q-Z48;y@z>3y6|&n zZJI^cWdA9Pi5_rRfF~Jmwr@G5Es)<7(?6>gpva$_GDrqe`CbGpe^-&Ex@9xiU!qm# z<(;ZpYhRnc1}_~IBHd1n>;9`#1;;U+DHi@4Gu)&e+YWz;5mnoy+8mPGN-zK?P*GJ* zECiw(GI7F`h-5GF7a@09lF&(l8wbpH^S_DN&tNB-bj$-as68u;(>av-C6%rhi z&9CwzC{FL22WcUZteA0r2y^Vup&akmCCeA5qkvw%`*S9P_{^bEWX(aIy3|{t#Xb<6 z{O9DpvNVUb77p@s**uU2Te*0UC+|fGR!p^5=HDFv=bM&Oh_pI6BRLsFqQKx}>8`L=;kh8mwaI!3nrwFP;Lg~N?7Y6MupuW7k14?=siKfANZ5$-! zw|$jjS>N)Y;02A&kZwO0JvfS4FcM~z6?<^PNL~??b@%N9yc<^Bp>Qa+Iw*$LxK=3F zK>Gk!wo@vsm>B+p3$(c30?961JgBCZtj!_aj5h~ZFR@Fc!G8SLAX+waD2_k<;Gl<5 zG*}UfgPfPx0H^;U`XJ{ewjwAkt%Dl#%V7sG9OQhkv=tK4IkR&n?1c}>d z2w~|+maEatGXXy#3eAHmURbb8I8BB#78VwMw8`&Mi-J{I#SkU!4~13ukmz-S09k6W zOg_h;_qX5Kr^wO-PGxn9C}~;}@!7*17~I}g%_KNaRxV+vEbS=R{zUkJx4Ooz+Ht_3 zSaBJ%W$OsCVnUdSsw(EP;p`72vb8i?b1_b@M!0%C@@0R8YgPt5Ts$9K%{N7aYxY-x z*XRXzShI#;N)8Wfw3gZ{DR(37m`u3u5r&!+XMO zc}QFqxK{A05+iyua5<~lo#pQ%eUfJFpXeG(gWXvp>d-G?UO0i#cxUb3lfzG11}%A$ zK-ZrF;T?g&gS>{TQ7ZDfa$DUfzLs!>7Gmtr)I&nD~W`yd>g+UsBz8kMv1Jrj?4^mH21eXN2@AS}> zk2AB=6h|=;;shU7OfoX}0QJtY0vfD@^FPV}sK34#0h9JFIY6qOoTUlE&x`)m1>$n3 zg`;88*1rtFpXJmmurx#Xb*+P=?zaK9R)Y@Ey1w>91ZDY!XfEvku=Swx8pZPZ&tIEu zj2q%25od@q;vw}I_;tvM%14|qR?q@M?iU;&Ri7Hy0^w&)9-OdM2q?~2z`tlAgh)MF zjvXA;n*+kgr0j!oNDHOGDn}iZLv9&hYj25YpnfUFA?yt!Phf$A7!U5nKyegI`tIYu zY&C^ICLnn=|Gfw#uTG^oJ2`cK=lE7!n_HnmvFyF{kIKGd!_*M)-j!|(!OVhQEoak- z7u7pn8?zhBb@W-So(g>wmAh7~m?bpP+yByqU^~ctX`~v&aUi|}*@a?KJ^0~d@5S<2 zCljE@1Ep~=aC!c_@;5US4m}^V!Bx8c2_t6Q`W3K-Q&m7UU1zvRJ@!5RisQBmsovxw zIG^)WQYaQdgF6Lg3a%1)GXti5Mkh% z{@)C58h}u;k!Tlx2@I?eWO_0*Q&j@!*fljZ?a(_vp3$=D;7OYh%v z0RkmuAn&uVJXSZ$Fak7&_?;^8yK<51eH2pDaKQM@UyQ3ft8x;VWhZW?BiSEb6?o;_ zoBCc^CS{J)9$W*j?lZ8J(&V(im-PErkg|Q53JlJ%4VvbbmJ^q(y6=U{Y`zGoc=2BZ z1Y}a`^7C8hiA(29syw>VZnee;FmcfkVUjcl}=;X+(pI*D*#`R#xDFG?>?(=h7-dq$qu& zTk33k@j*N5{{8#GK|pELl4XZmXt8A5=bt^J1)1f)BK=nGJS{frqjggnEJ44+@NHN#qnizxDR=*mQ!k8n|Y5dlCnVwdTbtgcQ&_j@x$%6*9;5{@=U6Kv?s>jTg=z! zqfk}fB41va_=hSunB%6p@YNBAI$Eu?o4|DkN)9?#T<%{SMnkf0HxtRBWfalpyO3_T ziH!~wS%w=3qCD4rxX$+%fP|(QPAySVJJI)=m)9;ii_we7NKKGp1^2VeB7dN5K3Nz=&#nmqD(N>w zjc$bb>`cYUEn`UA+d$zV$!j(>6h|^(}IDJ2Y{6l*i2p>G%7z%XM{(exy-*5 zM!7HP{Il8=SVB^gkb0cc&+kR)86kPV^f(Ilf?x+EKa1KWTU)6e_jl64Hw*ufPznsr zTX9*>twb1wBpCnYgy>-t;0S1GD3d6eE59R@7wniCJTcqC@wLC>7B}kTosStbSavC* zn;Y~3WeQP38ul?4jv?>sbv98nnLmjieK7Np`t9{Bl}xwuLcLNgQB2{EGA zpm_NI_QM}!4sIoq)X)*zKeA7NI4)cRTNrrxkG#~NHGzwehdK~3Z}~U*iC+eq3%rb; zRG|<3vfJl^-Q@n2Z%zp&c{ucL>PN9U+;x6pa?Hj~pH#5tP+yvg2_0NC- zF+DtNsLFFj5_lFktg^pg|`=xJ( z7H~t}7f#ScoxY`_tjr4PQ^1jXZ7mw3eRe=VrLC_>ww#%swA)GrUfpIsl;-Py7FK1U ziWy=S1W`}m;XmJxXOtpp{5D7pHT7)6{=-pI6(~gBq*PUi8wKM! z_lrL>akz}vM{q?0afjkJAPyN1aYhO9Pb+Sy9n7P@WP( zpsR2~DWFIV?!;eBetfq~P879l2m2N$pwAQLs~Rg%4$8AAC8~G6{^Uw|QjUhjJpaf9 zZ2IvF?|}Huo;^GMz@zptxmyA7-=JXrF`d(A`KtFLu)8MS3&o&HmEt^F^p4S-BViVwmix8LDz+U;*A?&sCU8_r83oAR*PPq&i zQ18^veqdo;O46TX*Zaj430=8P-1MWEr{79)URSn* zNDg(ry?Nyp!lz)D3qm*$e@=V#*0lH4qtKN{4}QOEkmxAK z`U56Ebpa-oAeLa~%+SZ-A;Kk_3tF6%&=F>$Dv~+873VBHvUmvJAE?AxT;W>On_aNG zsV%2_9C|}s9r#cMWc>=69jdn9?cQE22YEYSP3-m_k&aRFepcoo@S2PApK|-!pA9I%odx$u*;VjIn z8y^0RVy;h}ww)yGDRob<$de^>(dSStG&gjrk~rch<^+f_P9%`Sc~vr)EcE#3A{1yy zA-Kluvf4%ngk#F-7}jw2Ya`Qfb77SYcH%NPVaV0%9bEv%+K zv*DPaK7?dLB;UIXyL=?R>x8sko9cr6HppTyXfBg%wxz1}9R4+PakBTxC46F_ zbib*Ji4|w1dsa-<8S2T&Z98zzDt>Y2@wFpG@b4H&D^8G;WNPe8>D}K!XIc5IJgvaw z8l&TDVLn~A@Q0y#a{67*k4G`D&;FdLby%N2YcIQ>pWyf76^cAzooS+=(cA*ug)dwn z&Q7Ep3iS$zbEU|bAdO9wQOFE)wUA}S|}I=U@}QmzMKM!B1kkp`B@g2Hoe z^Ox}JUEucruFc<%!)$b?ZlcbwRcH_{jQN2GCRLFtPA}<$6X~%*sR$n6rg{)r6;% z!{;`-Fh0??*vi6naegl}0h0CVMmh>ji!BWFl{aii5dz3wyr}v)tr24+ z*b+}$Vij}3o+I3EXDVi+@eSiG~fF%R;N>7hJFYXU&{Vu zdvk=+H-zt0b`aF|y7?(`S2;(iF8?l`li|v>)wE-*_={8&LFYS<9*nb^od|ED9uWa@lA=`8_TO+00>u$CoA7_yYQ?abe z13x(`6d6`uE<9<{N+Zeky!(T4##V_$@!rohgdCk~bkoDP6r8sB5`K26(~IMzPI>oQ z=k4%2Pq)Z0G;EEwW?2pPtx0AQ_W9P`;UcG8*spwA&Q4ZR|Fa~8Meb~)Oy+y$G@Q9Q zYLY+NsLLcg3z3q53{hrVqnuuMEA=p>79`@#WQe!q%P32nv{z)7f@euBdRK*GR^rj* zlSj^yj@hBuwGuVSv|H3=JGGSfpN~kVm%uSCwmZpKOm{t=I0-|p!c^-7?GN+kj1=t;RjAG{ zmYu}@069VGz&hVX`XV5=`Sn4(T3iY`hkA+R5w3&FQ=Qg@u!>cZF-< z%P+$mNIhC(x5b}l-BHGC#vyWhX*DnNV_C!ZJTIQ=BZM}xeffDyE#S43uHfk}FF}EL ze>+7wgGu>exlpM7DR-H&wXz%7*5*wQER1ltYD0%`uGn_q)Ev{f4A?x}Ie-ad3HjrW z?J*W~;8TNHbmu4};%Sy&HsYjDM9vXDPtdw86yrC|-819}G#RHh^{V>C3hWV>)C+M! z+Q}=j)9?*6z3ds?IGN$%ted@;toJE1+g&dY37^U{5mXN-8CBPQ0pVx)vk-FD(`s3M zxF})x{EUX@2E!DqQ-QC7G0Tc!Na9O>%Bcof!lsyh0kXX+cUaKxWI^t{<*LktP2urV z`1|#vbz^l_>_N2?ejKzHc94vhIPA}NUI~W-Pf9y;`<=&r*~b1FzCHCh_G0i>=Lc=f zQHQIa<>{MUE!0PyLY921eRrE;2U0^^m#Sx1uML*qe@0O(FAn4P6qzyY%y6mR-8qDB zxp%MrD!B%)e*-aDj#d^ac?tL49#=S zxA6B!N}mpsPg5)2oY$CpYW_ZXZfb`Rs-bwfpOpT|;%&Koti8$ja?>m3Mi~Mwk8t0N z#*)`V`Z2ronNh2Gv{A;)gr^{gYSH@bS>2-dx9<--`0rLH6!>^z{QDa!7Z0CdzP`5! zqmS2x`19T%d>k=D51i3?ZfuwJ*d_Hq3T$|DEP#)I-XzgAdfPj8VejXwA8kEe+^j#R zy{7iI;vJsb&n5((I{k!Q%12CY`--g6!uln}yU+3;L};U{_NC7Z^2RlJ)%f8p zhIC7H1dM0nr>dG&=9>nG8mC(HN(8W16Lu$?@#C*At)BNCl|?G_*C5d91DZkeGE1H&uhA?(MpaW*;Ic3!F1N zCODgAGcqrXU$8IK%u8CjZvsn*T9nB^O;V3E%&g_b4BXZG~?y9F%!X!xh!YK zBwWa-cb>r(2P}c*&$N~9t-2rPJN8;f$Li22ci$S7qc&^L?B^S?O8{UTuEa?3$>MB} z2z3o!5`;YKk@jrlqr)HriVl6?+;LbaIAsm{E=N89wRa9?Q^P0jaee=K^0s^^YR@oa zm_77^?v{+rjWG%byz1%q4{V~Fj$k}Ht5wVl;kM@;P2Yc{SL6s4$)(wD zJIs|&cfT`~6@J3+ zI|Q&}+AAN$lv^e}?8#-n{czX-eY-FJ?iI&Pn~kt!pYeF{hfv7qr?#CV%>X|)y;UA` z9N;?i1x$uYZ{n}7COSSNP@!Hd>6fY{a9_In*lR1l`vcRslGQ-Bu8T=~sqKLIC96|) zpWCsdl#|NcwQ?`H{PZJUtK12QCWxp@A02NTERaWs)OvC^3+y#ocPZw0hePcli`oo> z<97Zyd}Y;alrKlhw_C?ANa45SMpLhyR=YEL^1a3Tv=%WPioQh09_?8~gy`H;7M}*( zUd|GFjY;9WlyGo}S4R?eZjE4=SEWlqOSDFf?sC2nGV7CQN7I%%%f8bcIsBP$--f%n zMe*Z5q@6;T@#WUN+jDN)66%P`fHv7K+y3*@-13ZU1=q;uHu`!h>b~nbi^n04R+5aJ z!%o{jYrx-~huV>rJOk%+b^2nM9s4@`f9XK3*h^;xLGMXt=3JQ*VmkaZOG+0&78?uF8sdLDXc_apCBfPYkIwkv+?W!B!UkmT@=2gC&Flw$%~tOMoFTY3#%9<2{Nx+?uFh|xEXmQ(t{!obw- zM0@poocLix#U0s(9O*ZDn$l@yDSHSEJGj=)=GhON;%_-At}0nHzoiM>>!<>5wH2*D zetzAbKKm*4RC;VrCZj{1>0&_JoHx~YK+T-cj=8+-WBRl@9=crxKLoPcpp;V!4jW`l zPZkPU?BlwA-+RqQLjCIRMXsQ0>EQVkG%t{i?^J(Gnq(s2b7X|I!8+em5d8_6%5`0` z43i9rkgofYJLFTO{UN&IrN}(>%O_F^ssX`JiSTi=Wa|3}e37;8s8-ja{2;ckWMSGV z$yY~c{#atNQn}q6sNC64(`*UbA>T!>DdshD#&HgAjEBq`CwQMtU>vX4`*@=|WY5HX zEuD~iPr=;LJs(IKL?^hei#!y0KQj{`T%ou70d?-nxUDx8{|KxRpf zMj1OOg|~!%^~1S(np8XMi#;`VA?e4VFSTy?c1UYgr??&6e@q?$;f#p7LzQ2$WTz&c!dUHM#ztQoUDc_+?{^JMR!N~x@;Gjkht!a_tPg&d9-tn+4@?m1`7Nv(U zZ>USvHGjZ*OQ^)@upE+rs~Eq3v+5aP(fX_`jyFDLiB>mQK43Cxr(Mb8>3&-5obuCH z!JdYB-o#<90)IQhlAG1fp3D1md9LxuX$efK>v@2l23KJ7Vj(sG;c?_di11{?^ZK{X zoB)t8J$rwnBOwznZ{nG1&}`|m8bo-T0>^ zY>fJlQ+N$t``Ih`^0+#DSxy+c1r0ri06(}O%_Wn`y%Ckg>&a>eZiYb9fDpTP(X-_+X~e<)QTaXK+v~9MNeteE?r1kSwHd(aU-k8aRG# z70zOsMs2*W^XyQUo!}(g=5If;M|#OzHb zXd!AwcGtVkj~q*?A-2vqj7vGyd#!h(>U8+G>U5;sV=Yyt%Y?97ZDM#NoA)PK{X6?& z!4?L^t4yV~&K#dvIuus@YT7K~ruwAn)vEVs`mvogHPZGCK?HySWHCL@!^WffIHW>x zIqK$xONR@UvW%G8S+3PRNp7@* z!vCia}?Xf2VAiayE8h!V6&#ll~J@mUMWXSvQai%aICd3-yTUbI@O(uPjI)> zE7v8}tJFdBx)A(4yZrd3Y)hNBJOU>+9(GMmg)NoUxj#G=ulG$NZ;)z_lJgFsprLc8 zJCl2a)HXcc4utPqh$=mBD$ZBITl9^`R8W2#0EqSQ+j~$p14YAJgW?YWgD}+^N9e(g z%jXh7s{A@OpjP|p)hl(GjXMB$tCKLFqRe3G?|F9H=JVqtwVNHt7PxkxQstJ31poU{ zBz@pul-Hi^+%vXG*7$L($Iox5ITFdogt2h;sJ`BkydKkXwMP$oTELx87TlGtP5GzK ztNP(b2|KxG$7JiKzKBkVhK`pFm51Gn7}D*F4pIEB{uQ@XgGMax&XN8&SBLY{r#M*< z(Emv2InX2kI|AAAWZe9FpGeV408Z#2_kC^9musBo?7L_X!Yl~t-4m}luaVfCe{$*@ z4!<|GV`O))v4yWiF*MR1y}vgqU(@I~p}0d*e#5L(Y~)3GrOhj!F(dO$vC$@6kbHno zoW%Z$k|*rRSuGN^8vtT$V%Jxmu=MFEUdS1XLSEosPKd5tinZr(wv$A*kEh#qDRMN1 z$UMiEr22~Y$=nB`-mEM*84Ka0nL%;5GiCg%+iZ8HTB=ew2dJq#3Gx82%ACdYx!hIU zdH)k)cJsfvtrwW$Q39@D;>S`A{h5LK#mD09Zb_!w3u2xcW{!dIRr)U3Hru8nql2b) zRT@G9ZsZ)^UkGYi1e57 zaTBhWZ3ImZe1r}eU6X{d$JG0rQXcCWKEc(f6nG*UnV*!b%u=gFoBIkl#imf$VgXYY%h)AwJPPPsm!Wt-GK_81`mvP2wK zge4@mECrXxT`@;tOPJ$2VJZ%$dtcl~PnHU~w=mmaVzb%0rn8voIZkO;_$)kj6&3;|axdCmHg&uvA`QpI5aY~^`@6KEfnaQL8U)bS-S@e#g zpYX~s+oecqX+qV$q(?qsu_2_jkLwxjn(ildxqYtW{8w^_YV-aO;oT8Ny~+~ljtkpi z7sTszglGWleEkn*J$R0mf=lger7X_}nvR4eJMnef$P4WGb0dh6k8p>>>(_cfFH zq_1CIt?tmTaGCYh(@7UNpLVlOQ;U&9%{(2?ddbT$jDK_PX<17D+F;dpgnnT?0QBu# zeD15}zrAQ?X}w@D+GzMaF6-FVK#`JKphf*+XE0OmbJo?gGr0!52pWbECj2ml%;C1!W;9}tm1nrpom1T5P4$=kowX1aVf-g*;cU>w1%Uchv{X=}63P17aaz;6 zAmRjgPc>!bqe^wu^I2YoI-sS_OEONWrTT~xZg-~Zsscxlz*d8HO4N1~(MeE1^~pq+^{3lYJs02@-1GWP z0TvAjLF2D*Ki&T65FKQrfnFTHuB*%DyMH4W0s>(+RGq(&S*baeua!_YQb|&Fnkqd~luXl_&zWG0+rR8pE$EBq+GhF10jNgRBt+t(_Ke#%{0l z0zlnyzpn;A)jiRjf42^RXmtQWswIZseR0SW;9}adK~o;FMUshMM?fp>OQ_gL8EBcc z>;>&&06jGT@N+@l8%v{v4mpC$eE<$dIMkAZVjP&8UCb^)D;z-_1={csLVdh(Mk3a| z0+sE%8q%QabR04Nh>|1UzR-_>0|tKv3WGs%bN}{m8-0kLrwkD@FV$stCL1}C*<{PW zd!T+H(a)aOM-L~whVfle||el+cv1>l_=gJSglYAb*bhhWRs zzo{hnI zHQrsPDyR_XRewXkEg>xv92F-%hbBVzai^q7#A*IrO>W?@#_~|LvtvN#B76_OH%ch6 z2L)1Hcz5tkrmpd1gh2U&P9E=G%Vb}jj}JOQ$LaSLnd1Ol^$Bz?FX#ifoSr`g?fg%& zeCKJkm5`2jv5ppFZk~r}H(!}`7*u-|zuc&!m(wekx(HBU!OmAiK`~lOU8cu`?=O(a zTfDUbK$#RUQeuq$+oan|NJ8tb<6sSv0P3?i0Hy;lp3iiqz3L)dC~B2`-;iPDtrbK# zhfMw5Wb26fX*w(b*#*a&aSpv21n|whr5eOdA~p)x6ZcspekWbebsRFdNra3#Pq(*c z1ALf@bQ4%(qKc7WjbHV)t4_8acUr@vke4F>;3cXHhu46uFdGLNZujkdkQR%Qpjk5~ za$n;n!f$7)9rofpC}?*YCdRL*!7GA{KZ0Nhv?mw%4P6)YT&|^T05H?+Wb-Q!OSCwO z5ji!ABvfm$o;O8_pLP?B+#Z5SBk?|t&U>2+?L&rk<*A_2%FJx$>)VaFKKI#cpqI0u zAn#?_?rau7pxdPjoha});#fbAxM*i#X-Vu!20Bw{uLiQ?r$W6Sg$Sed;BNwhK$AFO zN*L)lOG(dhjK9{;;trgGmhJMFSC^cB+`KRcR91~wO%T+-Z@v;SlM@gWytS{%JlCi& zAAQlPyUB0oGu0mfgS+n`tk1^6U@3q%w5LaXyD}WcAyeUp+XYHWgpz|(XjWO|M{Lc_ z(oAh3b_2(gIcRdDsDsnr2qryl2QtN3^=6kp>^hzPNh{FvVP9@yKh-^4f4yAv_CjzX zR~iVz&YcFRyw+W14>E`2|1RzON9CJGX#8W&l9!HG7p{vm1^~69rs4*Yt zMyv;4r;%d=6R6pn28fM9eVatwHBiaSLLP6s&I46LG{Df{SoGZqv0Wclc; zILziRQ2hg#8K==r8_-tU;>#APM+MrX5on)s@m%eZXD+qmECNPl%b*8VYHz+Mv6MTg zM6=jR+Yw&nG^J7?3qTX>OZM^=(@EanzI{W3Zf%5`L(JX)lsD#GsJrfp-Q`& z&`DwBj;f;KX8@cAkkIU^P;-F305*mLw6>Xn->{fRp!0mN%Cl%&>S&jl{X8&IrFwF2 z#nrDGW+$Jz0{TvYXBe3#Ru0E0IVJV1)=v2!1eZKYANk2@|>$54N62a&+#2| zhl+^S_rLc(|Lea}ab(nMbN(mZhZhyZ~DbV6K$_ zKUgU&pO}ZfGgT!9xSYOp_2d+#@Cpt# zR5~aR+S!bvt36i6iCq)8=4&a6p#wEYB)B#J%&IB!NYtTjfIhus)4vRy+A(T#5U{un z+R~o5nD4Jc<^cSgef35!aC?5cKhuf-jR)~{FTpVUWap2F#R6yvoimW{xX?NEt2H1p z;&B9M`1yDqFV&cHmn_@i-pY_^c=$P(uRU=IPxqW(CTM|J!`nPTV(Vf3~ccna`3Gu;glj!zFW0nbC< zS17xw^%Wr33+;(|3n0|TuS2Ac_W%G!K7h^`I0fLJ`P7pVA|r_|S0mH7aufJ=zrBT0 zAT)Mqk7a->wF8K5!2Zdw@UE^d0w_INT3G1wsePISf7F49Zf;M|2c6#l@cChs6s1et z5g_Cez$@SzfUJORY{|^tb1lSafvlMJ@=aF#I6j+H9=2Hao4sz{=9`pIeRWtP!^Br;r5wpw@uc+1PHZEFmqs)&sNLAYMO- zo2{ia9Jw}rwP}@cu5dUoRCQHMDiB8+^EUXhHcsvT(EEM=t`Mj7#s4n@#AViY=7#Xx zI32_qIPCf>+!bL9w}CrZI;#F?Rd$8DJ6S_$JQL@aQvN}s4Tn1ejS@hkJp_U9T$h}s zI|2?g*&<+$>%EYhc8IH+o{ec!iJk$7XgwEbK%vdN`a;9V>WdS?)sr^p`^%1qR6=cMeacsGy@fx{rcqJU^zA&!;^ zJCC2Iw_Jyq&38Ir#&xj)53rl7a{Xf&udnT4>FB;bsg5NA=I&^*vCmDia&!bh4^Sy1 zZS4>c182+C10Vst`%g!sC${S916@y)=MJ-sHz&B$E^)IW>yPY3SC z+MAHeFDdN^=#MfK4j3e0r{AF~?cz=|a;qJu+*VEed4NDd#`z{NX1x=5vY`=O-IxYY zowb?P+SiWyV^T=~%ryE@**F0dR)FP% z0Hn$^)vyC>{`vI|n4PsX%|!m8ru}RY*S7U<>DGphY^c_mbv@doTBrX&wE#D~>No!_ z)sk3s;hTje`73ulH9Q98y5Z#fV<%YHUoE~?ccNT>u%z2;QmAKw4M$91l(06{}sSpUwjjA*W>>6 zgkZC@{>r#t3bYQNzXZB}kOcimO)vaNM*qtaR@rr5v>Cb*;;VxC71&*0eG_(9jqZOK za!Ct6dU* z&5g_V2;kS~%6jelXM_E#(kKb>_2vJ3WdS_Ff0?pKZ`63#qm*B&EaLxLHGbp&IyuU2 z)bBT<&5eNJ&%X-;u1$V?d$D>=zv&B>)qo9X-1uB(=_R>A)AxU%q*5 zB98gdmBHxY6_2A{9kz*Il_dF;_47kr&_Rs*&$wWqT}mQI=`nTw*h3yS5LcV5L%=A55#ozZ}&Z|u4CrsIKOF%)kjsR%hEd+G)i!U1&oYH7g(5Z1`lg4LfQ$l z{N{Wkv*?0w>U^s3n%;bx^R`@0F z)n!|hDy|&8j8=9-Ok8z@888k+V11IOJ1gzjy)DA6`$@xm<|jvXpnL2;vS#n6@h%kL zfrw!gv$8aI+=&rP@|6;GoFrI1a=CkZb~2hJe3`n3p9%u67~o*ZE6|E>vgnL(Ya?6V z^P$FRGG6zUM)-1?DEWOeoM4NS-3uD;8Xo5v;(SHAk8F`);C>|Lf3^~T1igGeX$4() zxJzh64$KvKS`XKAt2_lnTYloaA8+o}kQGgw4|>@QSBZN)&XOEwmLW_nI;R#Xstmj6 z{gjB07s=1Yr&E#q)VL%ELCL^F>V&COTR z@9{r7L&{BNx)8_@)4B`UHvR-BQzD<>JhBZyi;_3((5=JaxCa9GdH5NOY?nb2F zRWe{a7>b>jt4)ZwW|tkPBFs-M6*4W?q+#=d8trtJ^HHhiW(w7DSCY%B1?Cn&$q$qq zC!`mu43a%+mZIkI%^{ys5-EJD&}`f{4tjoi)qzZR*nutW-Z5N4=8j(5`hYD@GR!pK z2OjV;eHK_YZdm&KRz6XMQR!(A<3(#$F)8!Q9o~_Kh-Eb#bMh67OLFabU{s-g2+q*^ zPB3roO`EhW^gI2FXWMjd!EzF2wWJJ}#V)GQ(cj%yE#<4oGr|Zy5fQI+y)WZ^UXug^ z_a_zxfXSGfIegw`tpemMFv?Ls!GWKemp7^xsQ|taf;9Oc{sq=>y=6%ihk& z$1qc85j%|&6>YaxmNp&Ki;6~bFw@`N4=mB_-|@kmq_y~*&pYU@;;PCD9#;xArif0f zCzB-LBIzG@X$#AQff>*wwQ+2SSiK<#_t5=dbcMVX^Ii=aio>+@B)%NaF}$w8d;>VQ zGri{^-1BLT$ka;FY13Q1?C*Ec82*9A@S9dO-jAholYE4m70%y~YHl~zS<>oZU%+LCc4kPBq=r~hd+&h z1{`aEF|o(%1a`|5<(*^?AJ5%3qR9TvQI5z7PPMO-28XsEvq{?q@LFvKseclGsuB6y zA^N?xCxQ1iP8g=~ew=VkrPEUGY>HTTy!KaljLwyskKA8!6en(v8_%`{XU_YSteBiUZIR&zpuuzdXJBWR3`dXn zhDATs6C0co<8MDqiNNh{!e|~96mgZiN3${#fR!1lmQOkDkd|KGgSnOOm|wHRF)y?= z7UH3(3U&LC#mdOA_a(@;Dnj9sHS+vxYQ2t+3W)n)44|e*c7Be<*4U?g%7Ytq7JZ&C zthGWh-9>B!!_852+jHgqDPCqo>`m?e2zog>F>-PC2 zRE6hfvwTpGCc#Pou$yP`d-sw@DRe|{?hHC(mW_2EV6nF_RO9J1PW$^A&<|u9NZi>5 zNMGHLw>cpfqlxdGVLL)2%4d9p&1amJXQ1ro_N5l`NHQvMOVS^VUcMtyz2K&NVJ{=U z((*m@a}EZ|K7c@$hZi8wrxASko8WIg{Edgd_2O^)@VA}(+a3Jv7XLpC57OC4XTY`m zry_u)vWA;<2rSHJE;Q2GdLPUHM3md;2gHiDG4ufuQsr21H4vu_1JSRk*f$Mu5kIqr zmjUh$4$CF_DwjxDGUXE32LyqKfN*P^b?@(ybPMd6^1S!y7@iz>T218nCIF5EAC28L z?3WaQE}cAn;W?18zuwF3+kS?7uh^Cu&^6=*mMx4j0zizX5^SzRwE>&Ia$e-8KI5Cb zvFLo^;NF*NeiR7aPpKG)`fhgblV#xk8bzm?!X-PB`l_pf*nd}pd#|z|h56Y9c1ALv z(()ov`7&^Z55SJ8)mj!hgQ6*1Vi)NB_tFo*8TmVu-ksPo)AoYx`EPU#ytGxd;>~sP z+b12Rje9DHgZFH?F9y;rmS{-@QBL6`&l^`j#r3m?8@TPb>dedk|8rBeG@IM0%I+`fIA z1NgaZRrzqi+{mb{G~23bQpWk~yTFo?5??X9+eyQw2P7TpmFTcXuB(1Ih~1}IyH^q= z^)4Vw2z5OCF^@F`>UYS}fTG41#GC7x({Mx)Me+3U@kxl6aq0VN_abgS-Z&RpLVD95 z+#D`saN4Es0*B{7=;-+P=XdY?<_Y*r^CAzs&H`9R4kS?8@yk@@0ll-+OF*ML0*PEf zG{&DJX?i{N?9EL%$}rX0Q^bgxuAP?c@$sVswV7)SkeQzygtJrBMuAl7stecX=%~r( zrl#q^1G+Mn*L#XR)#E&eqUGklSdCp9o1f^Mn=0SqFwv2pSu*vWDGoS2euO}b=epRT%e#I}%&2Byx7WgjJ zK-Q8ozRMH|k19!mVo|wgn>muhti)^1vb(!`%*)$5^D%L(xJ|{E!0U&f?-7~`#NXTL ze9P&uWeLY4>>N7KN|sXD`>IE9+Eh&Te(fdeAu))Zx_T9})?<+)W`zj5;U;yixRs+H zy}F8!nTBasC7f$dVGz4nSg!HdsrCwUlnB3rdlyxSq1^c2P0JcR$(K9s>##3i2}!A( z$TTF9(^Um?@IsQa2c&)8-`VM4Qx`SueW*St4B*dSv zGf0P!&qdLpZeLPu-;2Fu&%|;RTlVs~@37yY6KWiLNh6Sn zue$_Vf7Be&q{=hF1aqt6K-%`czgrN@#NTc3?W3CdFc}7KBv-(j8J%x6aGY6)mDe}o z=xd7z?z&wDpVXitG(y{-1~xP_#0u!e7f&rO5eU(;ZqVB$gBY?~DLQVlZw`Sr0N#XLB^R19K{%EMH_y$M@@9SYMV^H5=-^ROa8 ztG})2S!G{2+=_C;IEFV@*WfX?6tqreDZ7f3-N=Sb$8?<7tcM|;=1TgsTRj->Y$#Fi z0sgS{y;UV#CiSpsA4tuiSE3*EY2Rjul?R zdm`c1i~~E!sW?pC#-YCo4pWoByo1-x+gAnSJ09L}^2bL_v;=ct^N&={Fr|7aaJ6@$7+xVz8<` znYlZ<&k@jwlDld__>1~31?BK3J<5rdaKTqHoxLDkIJ2rwX!I8Rot+1+nH4y;C)^}! zgYRYSv9oU;ws3E`vXbRZnha^}R4)>}kM}sUH1E;NhV22^oZ<94Wv-MY&B}#h?;rND zqj-TXYu`E;-D?)v5P!~lBr!x`6!kp8B=)pT!xNvewoD72n&n;sjGhc5oZEsW2mMA@ zH5?FAjTfBnDZKpw-u>cmsH#=m|0vWU|Xb3cacaieAi&-Y=J-mnx_W^UcN z?VcZ=zE|$sQ7jAJ!Nc?@raNO#9Vl{-KE2;7DCtnX@+nHfYT0GDAudj$Ucl5L*Rh}- z945eg?Ci~{(};{7Wc9DcbUYD-?|P)-D<*=VroT;#=pj#ad09K2rDLIiFP^+WEe9EntteCpBL4p z5^7wogG7>+bz-GCnpg-o9>ZUk#LD_C65Ey*7lVmKfvoIDP=SM;qdn7|_FF45xUdy7 zFn0nOmhXR~Ed+mKp`Z{+DdyDa{0xa6Rj|nU5p7x1h|&>||sp(honm?;yLUzv%rs)hYg-EdR9nSXnQ5 zx}VFe^wxs0gfCletEfF5QZ(~Xn5!e#rm;Q0qo5Pm11Ryn(;n0`yZn~m#)sH!LU~Ky zFu5xOitiBx33zA8>{N5RH#9WZ>Ud2@;mdqhZGAE>bV%w`U! z_)W*|G(rSTv&ZFVZ|Mge$S8nyX@tsxapvgpT+aD7gDb*`)+^lt;j&t2AA$ar3u@CJ zgbY&7mb#C%##i?>9Gm?7%D@`~>zsdqj&Ce>E@t`)#27rG{h&Mg#jq*%%%Vrt)9lJZ zJ<{Y2fA%>>+K_bWah^7g9<2C?XlN|hI>dx_3PadMNS|*4>05W~f@&6GiH(QQi&_pk zJIIH?q;0DGsZGDVbsO%<@tEi!E$$Ltu^p<9B~=!>k3H5Xlv?TOp8wqfWAD0iH}>sy zMCVRa_v;#damvtdO|TPW_f&z%cT=%UmG9-U3oCb3`x*}leC5(wj896?(rojH-fEv$ zoi{ikI+TtseR5JDjE?IhRr--^4y)L8u1Ds8NZ7>Q4xm}p^(mupEY-tXY!(jqy@}x2 zm*jG|w?m~Es`D;K~{^xn(&;+^?93y6DbKJDq zHq>scAb_07S1)+wgSTc#QwN-R$Cv8LmaDCtgVf+8YD5QzTb)E>k2j%2$_5A!h_hjO z3a|8hMhyy=7smjbW5g_$<4tmkL-u&8te^-bBzwT|T`?+hY?t&QmveDC;j0Vg=H^BK z>(Bxqfmox5l6;wJKW2eNl~fSBQZq}v^e?Kroc=g~Ow!RjrnvYm@&4&pMc-1Vq6ukt zB+|&dm{YGsE?2L?%zGl=1n~90FBvb)kb^+JVbtM_qVal$S2%_H_|QEX=?!@6agFDc zg+^b_TXhAHDR%gBj3PTSvvhHkhdS1p?%hTX!GByZjmNkR)xGizhMY?diPnk~ZD~z6 zypkf>@NoXy_(%QPT0V1; znq9Fc*v;A5M@FG`!3rx2jblCpZ;MY#u|1rWQXlkDtL2Wn!b`bK#x3)*lzWGw)2l7yWefWP zgc0S;pxx$&bEuz!EAmA((AWuyWV)W2WXp4 z`wDg+$y>3QfZ*!3u)gst5iV5LS070>@>cMC^JRQ=)T=+j=$b6$TVe?3uCnb+^6%z| z#BaI4KBig@%3DUAwnK9QZlbrZS(kgLAx=K8t4eX%g*tU#@iP#JfGGI1PqA|Dqt3o~ z$`A-*G~&ozNbU=q3$&ozAP3mw^b7-Icuk}}X|g+& zgk7P+=Sd_?^U{lG3iOoZeaY+r-nY4NrB#0Q`0 zz6z6k`<5w=>X|OEXmQ7azH6k^TW%XY4xe!=uIA|~E=9yLWY`kFvP^;`Fvv}RIFJvm zkLjHikx$5xuOE?59zMzptgfEO|v&cWtCRwiF13J9|>xlD4MPYK{;!iU&GRn%z&KKo&>iU+Ua#^DXkrBAt z&wXYpZ<$yolpeka4~_m{Na)WY!$K(eRBoTM;CUtrR7}z|6w3-p4jVSvVMZxXUpAD# zR7oh3;-*$RBv;ke5)d6OEpH1 zIgjQ})FcsCFhws1t(MNTnI$rbzEPrS`X^aa*BALFoc);AeHrtFJz@LwECZny7OfVyj85 z)`p&xyMnt478@ih3;{P0^$xeUF3Jjy!1?25TTKaFLb{W3r>CyymF^3{9q^rZ#&^9Lb)55-~&M#oRhAMR7 zfMt!0-2wY{j?n?1F4vGCRm3;ksj1i}RUK9to|N`_X>aY;p0Zm@4Oex2JukC*kc2%i z5ydnKvQPt52OZsR2HLj(8DYe9M(AVa(&mWq>UpQ2L(v}mW9o{FO`rN-NZB+T6P4Vf zNSS&je52+9$IvTWBJ1FTrjNY2JB@C*c9=^c4~&D7si}`nnA6$6UPg5!run&ecLyG!A zd`v7pyWUOmrikHQF#T?#3Y9eO0}krx5?eeU-nFGx61yE$PJznq(88Q*eDz_>2)o$w zj91EYpztNIBUG%*iDviE=q_kU;^oURzOnmiO4Dy#nP03#W-EJT*~Gz59;)!DmHZZ` z?OoSbwu9?)hIvt*W%<2leYj<`v7+xxSi-THhR+&E$>xxYN#`=m$Nr)%TtOjL(#6lSBJsb3EZ{5V+g zpkltCYvrb(NKyO6u_i6xxE)uV#ujv;WhnKtA+RUo&U>5!>t0C!H)r^Sm%(q$~eHUhpo_F%&NOSx+s7bH}4kkSk*j<*z z%ReI4$ea7MJ$q&*B0oW!ru@fCnjyD!XM!~r`g5dJ92dK{n8MQ=pYmn5m|FLdK81ab zngLl$>(lfjr>p_0UQ& zjgu>8HQHd3mX}Opbih%(`F-zdB1Z+0JMv&i%@$Fg8F+~)k1c`F-j8N8M}*Sy)o04@ zC;d9oa!1-BVf2X|aoVx-3Ja9q=WJGrfheaew>HIdI<5|B8}7T~j6wJYlk0)B;JyHtU}3cgSzcE*#X2QJO?5d1n;rQ{$TyP-+a6d49fW&r zuS0|zmqt9mtBCEdkX}x4iz0ND#%<#PSW)zOFeJ?|#=Ny55{Kjrt3d&2Yjkfr!~xK* z<;SR7J=c74Tsnq&vW${&c!A7$1_Qaa8oYMDS4Cw@PpV#UB2R8@hS^(J@<_P}G9mYZ zSr(obFfl#aNY#{*T05>$GpYB(nrZcSlN)2cbZdLR%Ik$^Scl>@I^*Nx38ySdDS?q@ zuHfU{7qZhzxP1mAjFxraaKgeRh9@eI$n{&E_lr)AUDaiI)(hJ4dvMXaOe-WI&yl*@uF;n)4_FTm{b?*lw zD+ONp-|QxzO0ecG(HS`+KGtnWY1PUg4_4y~kAz{}69aFY2cguTt1h>eh^h8sCeyV} zRg+_PRE+y1Dc-iyip1}1?6Df;S9^uOe#^5?l)X5o`e?iiiYFd^~1-P#QVjDjV22B zgbek&y)*zO&4*bTyUcS`Vzc!5=B?Qk@hZjtS}OF| z;K9?vrvvJaCQXDE(X!0Y;qa<@w5h{6X}dDwNVEE2V$M!>N^6H!-!cc*WhwR0In+Ot zvnKJ=>=^9X=P#<$&S$5lj z``i5Q(5t+I`#WMw(ro-LWXX8~863EF{Om?5^Zqe_m{fH)~bwNe2+f|9uoh1WlZ3kic@9iI<0JLXEq zjspmyrQ06SN!~f|`IpA@*opL`Sh71%LUGI5KLa{^C7gMhJ-OiP(`ojHug@LTw{Gpt z&IP`MIU)rWxj7VV6a3n0`mu=XOw9>wflGg-{0^Yz*n_6yH316V=qu_k$?027E(CrB z@fy3|3Bb!eRn24k(eWYhqIIhfyz7k%J`SkzwTGOWk6L=*r>jUM`T!-qTh91WAB)#+ zZtC4PfSTwaxgwJd;C5~-w{NagpxsVNrM0kk8o9nfq?KP#l3m>9f#mn{H%p;FO`JH_uENw#|{j4xiwC7 z0|o!1oT2F!lBh8owsJH%s7#+K(T72KNQgYZ9~NohI{u@>NC$|ksgf9G|pAy zRDj=T{wikn=F^R$M_`oezK3`-@xloPtW(Rt{=+D@vTp?kD1}$Tw62ruwEG zYu$~td~ySJ7k{<>ZbcAYTsOPmJv?pbeP1Q~@QbUay&yxkGkq!5br$0|AhBS7EMhGA z1~GzbfEK6P5w2xi&A##Vv8mI@;bX^&m2m+SP#~w)>SY2wYC4u?6yElwwHyI%SuS>A z`$0~=`OmB1r<6mfx}nAov-ZpriDvk=HW0GuxhlLqLD5%xA5wavVbC1e?e07`+HxPL z2xN{;Oq7`5G6UQByma;f^>_jcP)#les?(r{Sv@S(c(0m@>|?0@smt(ia(d*G@|8L*~B(*5xj#pb(<=HD*?GSlmm?XL<^%mh3|^MMn($Ib#7e2ioe 0, x, np.zeros_like(x)) - + def backward(self, grad_y): """ grad_y: same shape as x """ x = self.memory['x'] - grad_x = np.where(x > 0, np.ones_like(x), np.zeros_like(x)) - + grad_x = grad_y * np.where(x > 0, np.ones_like(x), np.zeros_like(x)) + return grad_x class Log(NumpyOp): - + def forward(self, x): """ x: shape(N, c) """ - + out = np.log(x + self.epsilon) self.memory['x'] = x - + return out - + def backward(self, grad_y): """ grad_y: same shape as x """ - x=self.memory['x'] - grad_x=grad_y*np.reciprocal(x+self.epsilon) - + x = self.memory['x'] + grad_x = grad_y * np.reciprocal(x + self.epsilon) + return grad_x @@ -73,41 +73,41 @@ class Softmax(NumpyOp): """ softmax over last dimension """ - + def forward(self, x): """ x: shape(N, c) """ - exp_x=np.exp(x) - exp_sum=np.sum(exp_x,axis=1,keepdims=True) - out=exp_x/exp_sum - self.memory['x']=x - self.memory['out']=out - + exp_x = np.exp(x - x.max()) + exp_sum = np.sum(exp_x, axis=1, keepdims=True) + out = exp_x / exp_sum + self.memory['x'] = x + self.memory['out'] = out + return out - + def backward(self, grad_y): """ grad_y: same shape as x """ sm = self.memory['out'] - Jacobs = np.array([np.diag(r) -np.outer(r,r) for r in sm]) + Jacobs = np.array([np.diag(r) - np.outer(r, r) for r in sm]) grad_y = grad_y[:, np.newaxis, :] grad_x = np.matmul(grad_y, Jacobs).squeeze(axis=1) - + return grad_x class NumpyLoss: - + def __init__(self): self.target = None - + def get_loss(self, pred, target): self.target = target return (-pred * target).sum(axis=1).mean() - + def backward(self): return -self.target / self.target.shape[0] @@ -117,7 +117,7 @@ class NumpyModel: self.W1 = np.random.normal(size=(28 * 28, 256)) self.W2 = np.random.normal(size=(256, 64)) self.W3 = np.random.normal(size=(64, 10)) - + # 以下算子会在 forward 和 backward 中使用 self.matmul_1 = Matmul() self.relu_1 = Relu() @@ -126,7 +126,7 @@ class NumpyModel: self.matmul_3 = Matmul() self.softmax = Softmax() self.log = Log() - + # 以下变量需要在 backward 中更新。 softmax_grad, log_grad 等为算子反向传播的梯度( loss 关于算子输入的偏导) self.x1_grad, self.W1_grad = None, None self.relu_1_grad = None @@ -135,25 +135,105 @@ class NumpyModel: self.x3_grad, self.W3_grad = None, None self.softmax_grad = None self.log_grad = None - + + self.beta_1 = 0.9 + self.beta_2 = 0.999 + self.epsilon = 1e-8 + self.is_first = True + + self.W1_grad_mean = None + self.W2_grad_mean = None + self.W3_grad_mean = None + + self.W1_grad_square_mean = None + self.W2_grad_square_mean = None + self.W3_grad_square_mean = None + def forward(self, x): x = x.reshape(-1, 28 * 28) - - #################### - # code 6 # - #################### - + + x = self.relu_1.forward(self.matmul_1.forward(x, self.W1)) + x = self.relu_2.forward(self.matmul_2.forward(x, self.W2)) + x = self.matmul_3.forward(x, self.W3) + + x = self.log.forward(self.softmax.forward(x)) + return x - + def backward(self, y): - - #################### - # code 7 # - #################### - - pass - + self.log_grad = self.log.backward(y) + self.softmax_grad = self.softmax.backward(self.log_grad) + self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad) + self.relu_2_grad = self.relu_2.backward(self.x3_grad) + self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad) + self.relu_1_grad = self.relu_1.backward(self.x2_grad) + self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad) + + return self.x1_grad + def optimize(self, learning_rate): - self.W1 -= learning_rate * self.W1_grad - self.W2 -= learning_rate * self.W2_grad - self.W3 -= learning_rate * self.W3_grad + def SGD(): + self.W1 -= learning_rate * self.W1_grad + self.W2 -= learning_rate * self.W2_grad + self.W3 -= learning_rate * self.W3_grad + + def SGDM(): + if self.is_first: + self.is_first = False + + self.W1_grad_mean = self.W1_grad + self.W2_grad_mean = self.W2_grad + self.W3_grad_mean = self.W3_grad + else: + self.W1_grad_mean = self.beta_1 * self.W1_grad_mean + (1 - self.beta_1) * self.W1_grad + self.W2_grad_mean = self.beta_1 * self.W2_grad_mean + (1 - self.beta_1) * self.W2_grad + self.W3_grad_mean = self.beta_1 * self.W3_grad_mean + (1 - self.beta_1) * self.W3_grad + + delta_1 = learning_rate * self.W1_grad_mean + delta_2 = learning_rate * self.W2_grad_mean + delta_3 = learning_rate * self.W3_grad_mean + + self.W1 -= delta_1 + self.W2 -= delta_2 + self.W3 -= delta_3 + + def Adam(learning_rate=0.001): + if self.is_first: + self.is_first = False + self.W1_grad_mean = self.W1_grad + self.W2_grad_mean = self.W2_grad + self.W3_grad_mean = self.W3_grad + + self.W1_grad_square_mean = np.square(self.W1_grad) + self.W2_grad_square_mean = np.square(self.W2_grad) + self.W3_grad_square_mean = np.square(self.W3_grad) + + self.W1 -= learning_rate * self.W1_grad_mean + self.W2 -= learning_rate * self.W2_grad_mean + self.W3 -= learning_rate * self.W3_grad_mean + else: + self.W1_grad_mean = self.beta_1 * self.W1_grad_mean + (1 - self.beta_1) * self.W1_grad + self.W2_grad_mean = self.beta_1 * self.W2_grad_mean + (1 - self.beta_1) * self.W2_grad + self.W3_grad_mean = self.beta_1 * self.W3_grad_mean + (1 - self.beta_1) * self.W3_grad + + self.W1_grad_square_mean = self.beta_2 * self.W1_grad_square_mean + (1 - self.beta_2) * np.square( + self.W1_grad) + self.W2_grad_square_mean = self.beta_2 * self.W2_grad_square_mean + (1 - self.beta_2) * np.square( + self.W2_grad) + self.W3_grad_square_mean = self.beta_2 * self.W3_grad_square_mean + (1 - self.beta_2) * np.square( + self.W3_grad) + + delta_1 = learning_rate * self.W1_grad_mean * np.reciprocal( + np.sqrt(self.W1_grad_square_mean) + np.full_like(self.W1_grad_square_mean, self.epsilon)) + delta_2 = learning_rate * self.W2_grad_mean * np.reciprocal( + np.sqrt(self.W2_grad_square_mean) + np.full_like(self.W2_grad_square_mean, self.epsilon)) + delta_3 = learning_rate * self.W3_grad_mean * np.reciprocal( + np.sqrt(self.W3_grad_square_mean) + np.full_like(self.W3_grad_square_mean, self.epsilon)) + + self.W1 -= delta_1 + self.W2 -= delta_2 + self.W3 -= delta_3 + + # SGD() + # SGDM() + Adam() diff --git a/assignment-2/submission/18307130090/numpy_mnist.py b/assignment-2/submission/18307130090/numpy_mnist.py index c18db94..6d67f25 100644 --- a/assignment-2/submission/18307130090/numpy_mnist.py +++ b/assignment-2/submission/18307130090/numpy_mnist.py @@ -1,36 +1,48 @@ import numpy as np + from numpy_fnn import NumpyModel, NumpyLoss -from utils import download_mnist, batch, mini_batch, get_torch_initialization, plot_curve, one_hot +from utils import download_mnist, batch, get_torch_initialization, plot_curve, one_hot + + +def mini_batch(dataset, batch_size=128): + data = np.array([np.array(each[0]) for each in dataset]) + label = np.array([each[1] for each in dataset]) + + size = data.shape[0] + index = np.arange(size) + np.random.shuffle(index) + + return [(data[index[i:i + batch_size]], label[index[i:i + batch_size]]) for i in range(0, size, batch_size)] def numpy_run(): train_dataset, test_dataset = download_mnist() - + model = NumpyModel() numpy_loss = NumpyLoss() model.W1, model.W2, model.W3 = get_torch_initialization() - + train_loss = [] - - epoch_number = 3 + + epoch_number = 10 learning_rate = 0.1 - + for epoch in range(epoch_number): for x, y in mini_batch(train_dataset): y = one_hot(y) - - y_pred = model.forward(x.numpy()) + + y_pred = model.forward(x) loss = numpy_loss.get_loss(y_pred, y) model.backward(numpy_loss.backward()) model.optimize(learning_rate) - + train_loss.append(loss.item()) - + x, y = batch(test_dataset)[0] accuracy = np.mean((model.forward(x).argmax(axis=1) == y)) print('[{}] Accuracy: {:.4f}'.format(epoch, accuracy)) - + plot_curve(train_loss) -- Gitee From dca4f6118d96e8409432e1b70595166616484a7e Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Wed, 28 Apr 2021 10:01:21 +0800 Subject: [PATCH 4/8] modify README --- assignment-2/submission/18307130090/README.md | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/assignment-2/submission/18307130090/README.md b/assignment-2/submission/18307130090/README.md index b4594b3..172af6d 100644 --- a/assignment-2/submission/18307130090/README.md +++ b/assignment-2/submission/18307130090/README.md @@ -6,7 +6,7 @@ ## 简述 -在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了SGD-Momentum和Adam优化算法,并比较了它们的性能。 +在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了`Momentum`和`Adam`优化算法,并比较了它们的性能。 ## 算子的反向传播 @@ -20,12 +20,12 @@ $$ 由[神经网络与深度学习-邱锡鹏](https://nndl.github.io/nndl-book.pdf)中公式(B.20)和(B.21),有 $$ -\frac{\part Y}{\part W}=\frac{\part(X\times W)}{\part W}=X^T\\ -\frac{\part Y}{\part X}=\frac{\part(X\times W)}{\part X}=W^T +\frac{\partial Y}{\partial W}=\frac{\partial(X\times W)}{\partial W}=X^T\\\\ +\frac{\partial Y}{\partial X}=\frac{\partial(X\times W)}{\partial X}=W^T $$ 结合链式法则和矩阵运算法则,可得 $$ -\nabla_X=\nabla_Y\times W^T\\ +\nabla_X=\nabla_Y\times W^T\\\\ \nabla_W=X^T\times \nabla_Y $$ @@ -34,20 +34,20 @@ $$ `Relu`的计算公式为: $$ Y_{ij}=\begin{cases} -X_{ij}&X_{ij}\ge0\\ +X_{ij}&X_{ij}\ge0\\\\ 0&\text{otherwise} \end{cases} $$ 因此有 $$ -\frac{\part Y_{ij}}{\part X_{ij}}=\begin{cases} -1&X_{ij}>0\\ +\frac{\partial Y_{ij}}{\partial X_{ij}}=\begin{cases} +1&X_{ij}>0\\\\ 0&\text{otherwise} \end{cases} $$ 结合链式法则,可得 $$ -{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\part Y_{ij}}{\part X_{ij}} +{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} $$ ### `Log` @@ -58,11 +58,11 @@ Y_{ij}=\ln(X_{ij}+\epsilon),\epsilon=10^{-12} $$ 因此有 $$ -\frac{\part Y_{ij}}{\part X_{ij}}=\frac1{X_{ij}+\epsilon} +\frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} $$ 结合链式法则,可得 $$ -{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\part Y_{ij}}{\part X_{ij}} +{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} $$ ### `Softmax` @@ -73,12 +73,12 @@ Y_{ij}=\frac{\exp\{X_{ij} \}}{\sum_{k=1}^c\exp\{X_{ik} \}} $$ 其中$Y,X$均为$N\times c$的矩阵。容易发现`Softmax`以$X$的每行作为单位进行运算。因此对于$X,Y$的行分量$X_k,Y_k$,有 $$ -\frac{\part Y_{ki}}{\part X_{kj}}=\begin{cases} -\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\ +\frac{\partial Y_{ki}}{\partial X_{kj}}=\begin{cases} +\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\\\ -\frac{\exp\{X_{ki} \}\exp\{X_{kj} \}}{(\sum_t\exp\{X_{kt}\})^2}=-Y_{ki}Y_{kj}&i\not=j \end{cases} $$ -因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\part Y_{ki}}{\part X_{kj}}$。结合链式法则,可得 +因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\partial Y_{ki}}{\partial X_{kj}}$。结合链式法则,可得 $$ \nabla_X=\nabla_Y\times J $$ @@ -220,7 +220,7 @@ $$ 类比现实世界,当小球从高处向低处滚动时,其运动方向不仅与当前位置的“陡峭程度”相关,也和当前的速度,即先前位置的“陡峭程度”相关。因此在`Momentum`算法中,参数的修正值不是取决于当前梯度,而是取决于梯度的各时刻的指数移动平均值: $$ -m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ w_{t+1}=w_t-\eta\cdot m_t $$ 指数移动平均值反映了参数调整时的“惯性”。当参数调整方向正确时,`Momentum`有助于加快训练速度,减少震荡的幅度;然而当参数调整方向错误时,`Momentum`会因为无法及时调整方向造成性能上的部分损失。 @@ -252,14 +252,14 @@ $$ 因为参数的调整值与当前梯度直接相关,因此取历史梯度的平方和作为衡量参数调整频率的标准。如果历史梯度平方和较大,表明参数被频繁更新,需要降低其学习率。因此梯度下降算法改写为: $$ -m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\ -V_t=V_{t-1}+\nabla^2f(w_t)\\ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ +V_t=V_{t-1}+\nabla^2f(w_t)\\\\ w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t $$ 然而,由于$V_t$关于$t$单调递增,可能导致训练后期学习率过低,参数无法收敛至最优。因此将$V_t$也改为指数移动平均值,避免了上述缺陷: $$ -m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\ -V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\ +m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\\\ +V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\\\ w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t $$ 使用`Adam`算法的训练结果如下: -- Gitee From 796b20eaf3db0e12f94416111db7a3d637dd4b07 Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Wed, 28 Apr 2021 10:05:18 +0800 Subject: [PATCH 5/8] modify README --- assignment-2/submission/18307130090/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/assignment-2/submission/18307130090/README.md b/assignment-2/submission/18307130090/README.md index 172af6d..e15556d 100644 --- a/assignment-2/submission/18307130090/README.md +++ b/assignment-2/submission/18307130090/README.md @@ -45,7 +45,7 @@ $$ 0&\text{otherwise} \end{cases} $$ -结合链式法则,可得 +结合链式法则,得到反向传播的计算公式: $$ {\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} $$ @@ -60,7 +60,7 @@ $$ $$ \frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} $$ -结合链式法则,可得 +结合链式法则,得到反向传播的计算公式: $$ {\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} $$ -- Gitee From 58efa96a17cb10f5aca17f8f4177fd3f758e4f1a Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Wed, 28 Apr 2021 10:08:37 +0800 Subject: [PATCH 6/8] modify README --- assignment-2/submission/18307130090/README.md | 564 +++++++++--------- 1 file changed, 283 insertions(+), 281 deletions(-) diff --git a/assignment-2/submission/18307130090/README.md b/assignment-2/submission/18307130090/README.md index e15556d..2420cc9 100644 --- a/assignment-2/submission/18307130090/README.md +++ b/assignment-2/submission/18307130090/README.md @@ -1,282 +1,284 @@ -# PRML-2021 Assignment2 - -姓名:夏海淞 - -学号:18307130090 - -## 简述 - -在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了`Momentum`和`Adam`优化算法,并比较了它们的性能。 - -## 算子的反向传播 - -### `Matmul` - -`Matmul`的计算公式为: -$$ -Y=X\times W -$$ -其中$Y,X,W$分别为$n\times d',n\times d,d\times d'$的矩阵。 - -由[神经网络与深度学习-邱锡鹏](https://nndl.github.io/nndl-book.pdf)中公式(B.20)和(B.21),有 -$$ -\frac{\partial Y}{\partial W}=\frac{\partial(X\times W)}{\partial W}=X^T\\\\ -\frac{\partial Y}{\partial X}=\frac{\partial(X\times W)}{\partial X}=W^T -$$ -结合链式法则和矩阵运算法则,可得 -$$ -\nabla_X=\nabla_Y\times W^T\\\\ -\nabla_W=X^T\times \nabla_Y -$$ - -### `Relu` - -`Relu`的计算公式为: -$$ -Y_{ij}=\begin{cases} -X_{ij}&X_{ij}\ge0\\\\ -0&\text{otherwise} -\end{cases} -$$ -因此有 -$$ -\frac{\partial Y_{ij}}{\partial X_{ij}}=\begin{cases} -1&X_{ij}>0\\\\ -0&\text{otherwise} -\end{cases} -$$ -结合链式法则,得到反向传播的计算公式: -$$ -{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} -$$ - -### `Log` - -`Log`的计算公式为 -$$ -Y_{ij}=\ln(X_{ij}+\epsilon),\epsilon=10^{-12} -$$ -因此有 -$$ -\frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} -$$ -结合链式法则,得到反向传播的计算公式: -$$ -{\nabla_X}_{ij}={\nabla_Y}_{ij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} -$$ - -### `Softmax` - -`Softmax`的计算公式为 -$$ -Y_{ij}=\frac{\exp\{X_{ij} \}}{\sum_{k=1}^c\exp\{X_{ik} \}} -$$ -其中$Y,X$均为$N\times c$的矩阵。容易发现`Softmax`以$X$的每行作为单位进行运算。因此对于$X,Y$的行分量$X_k,Y_k$,有 -$$ -\frac{\partial Y_{ki}}{\partial X_{kj}}=\begin{cases} -\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\\\ --\frac{\exp\{X_{ki} \}\exp\{X_{kj} \}}{(\sum_t\exp\{X_{kt}\})^2}=-Y_{ki}Y_{kj}&i\not=j -\end{cases} -$$ -因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\partial Y_{ki}}{\partial X_{kj}}$。结合链式法则,可得 -$$ -\nabla_X=\nabla_Y\times J -$$ -将行分量组合起来,就得到了反向传播的最终结果。 - -## 模型构建与训练 - -### 模型构建 - -#### `forward` - -参考`torch_mnist.py`中`TorchModel`方法的模型,使用如下代码构建: - -```python -def forward(self, x): - x = x.reshape(-1, 28 * 28) - - x = self.relu_1.forward(self.matmul_1.forward(x, self.W1)) - x = self.relu_2.forward(self.matmul_2.forward(x, self.W2)) - x = self.matmul_3.forward(x, self.W3) - - x = self.log.forward(self.softmax.forward(x)) - - return x -``` - -模型的计算图如下: - -![](./img/fnn_model.png) - -#### `backward` - -根据模型的计算图,按照反向的计算顺序依次调用对应算子的反向传播算法即可。 - -```python -def backward(self, y): - self.log_grad = self.log.backward(y) - self.softmax_grad = self.softmax.backward(self.log_grad) - self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad) - self.relu_2_grad = self.relu_2.backward(self.x3_grad) - self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad) - self.relu_1_grad = self.relu_1.backward(self.x2_grad) - self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad) - - return self.x1_grad -``` - -#### `mini_batch` - -`mini_batch`的作用是提高模型的训练速度,同时得到较好的优化效果。传统的批处理方法对整个数据集计算平均的损失函数值,随后计算相应梯度进行反向传播。当训练数据集容量较大时,对训练速度造成严重影响;而随机方法则对数据集的每个样本计算损失函数值,随后计算相应梯度进行反向传播。此时数据集容量不对训练速度产生影响,然而由于样本的随机性,可能导致参数无法收敛到最优值,在最优值附近震荡。因此一个折中的方法是将数据集划分为若干批次,在提高训练速度的同时保证了较好的收敛效果。 - -在本次实验中,我参照`utils.py`中的`mini_batch`,在`numpy_mnist.py`中重新实现了`mini_batch`方法: - -```python -def mini_batch(dataset, batch_size=128): - data = np.array([np.array(each[0]) for each in dataset]) - label = np.array([each[1] for each in dataset]) - - size = data.shape[0] - index = np.arange(size) - np.random.shuffle(index) - - return [(data[index[i:i + batch_size]], label[index[i:i + batch_size]]) for i in range(0, size, batch_size)] -``` - -### 模型训练 - -设定`learning_rate=0.1`,`batch_size=128`,`epoch_number=10`。训练结果如下: - -``` -[0] Accuracy: 0.9486 -[1] Accuracy: 0.9643 -[2] Accuracy: 0.9724 -[3] Accuracy: 0.9738 -[4] Accuracy: 0.9781 -[5] Accuracy: 0.9768 -[6] Accuracy: 0.9796 -[7] Accuracy: 0.9802 -[8] Accuracy: 0.9800 -[9] Accuracy: 0.9796 -``` - - - -尝试缩减`batch_size`的大小,设定`batch_size=64`。训练结果如下: - -``` -[0] Accuracy: 0.9597 -[1] Accuracy: 0.9715 -[2] Accuracy: 0.9739 -[3] Accuracy: 0.9771 -[4] Accuracy: 0.9775 -[5] Accuracy: 0.9803 -[6] Accuracy: 0.9808 -[7] Accuracy: 0.9805 -[8] Accuracy: 0.9805 -[9] Accuracy: 0.9716 -``` - - - -尝试降低`learning_rate`,设定`learning_rate=0.01`。训练结果如下: - -``` -[0] Accuracy: 0.8758 -[1] Accuracy: 0.9028 -[2] Accuracy: 0.9143 -[3] Accuracy: 0.9234 -[4] Accuracy: 0.9298 -[5] Accuracy: 0.9350 -[6] Accuracy: 0.9397 -[7] Accuracy: 0.9434 -[8] Accuracy: 0.9459 -[9] Accuracy: 0.9501 -``` - - - -根据实验结果,可以得出以下结论: - -当学习率和批处理容量合适时,参数的收敛速度随着学习率的减小而减小,而参数的震荡幅度随着批处理容量的减小而增大。 - -## 梯度下降算法的改进 - -传统的梯度下降算法可以表述为: -$$ -w_{t+1}=w_t-\eta\cdot\nabla f(w_t) -$$ -尽管梯度下降作为优化算法被广泛使用,它依然存在一些缺点,主要表现为: - -- 参数修正方向完全由当前梯度决定,导致当学习率过高时参数可能在最优点附近震荡; -- 学习率无法随着训练进度改变,导致训练前期收敛速度较慢,后期可能无法收敛。 - -针对上述缺陷,产生了许多梯度下降算法的改进算法。其中较为典型的是`Momentum`算法和`Adam`算法。 - -### `Momentum` - -针对“参数修正方向完全由当前梯度决定”的问题,`Momentum`引入了“动量”的概念。 - -类比现实世界,当小球从高处向低处滚动时,其运动方向不仅与当前位置的“陡峭程度”相关,也和当前的速度,即先前位置的“陡峭程度”相关。因此在`Momentum`算法中,参数的修正值不是取决于当前梯度,而是取决于梯度的各时刻的指数移动平均值: -$$ -m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ -w_{t+1}=w_t-\eta\cdot m_t -$$ -指数移动平均值反映了参数调整时的“惯性”。当参数调整方向正确时,`Momentum`有助于加快训练速度,减少震荡的幅度;然而当参数调整方向错误时,`Momentum`会因为无法及时调整方向造成性能上的部分损失。 - -使用`Momentum`算法的训练结果如下: - -``` -[0] Accuracy: 0.9444 -[1] Accuracy: 0.9627 -[2] Accuracy: 0.9681 -[3] Accuracy: 0.9731 -[4] Accuracy: 0.9765 -[5] Accuracy: 0.9755 -[6] Accuracy: 0.9768 -[7] Accuracy: 0.9790 -[8] Accuracy: 0.9794 -[9] Accuracy: 0.9819 -``` - - - -可以看出相较传统的梯度下降算法并无明显优势。 - -### `Adam` - -针对“学习率无法随着训练进度改变”的问题,`Adam`在`Momentum`的基础上引入了“二阶动量”的概念。 - -`Adam`的改进思路为:由于神经网络中存在大量参数,不同参数的调整频率存在差别。对于频繁更新的参数,我们希望适当降低其学习率,提高收敛概率;而对于其他参数,我们希望适当增大其学习率,加快收敛速度。同时,参数的调整频率可能发生动态改变,我们也希望学习率能够随之动态调整。 - -因为参数的调整值与当前梯度直接相关,因此取历史梯度的平方和作为衡量参数调整频率的标准。如果历史梯度平方和较大,表明参数被频繁更新,需要降低其学习率。因此梯度下降算法改写为: -$$ -m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ -V_t=V_{t-1}+\nabla^2f(w_t)\\\\ -w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t -$$ -然而,由于$V_t$关于$t$单调递增,可能导致训练后期学习率过低,参数无法收敛至最优。因此将$V_t$也改为指数移动平均值,避免了上述缺陷: -$$ -m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\\\ -V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\\\ -w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t -$$ -使用`Adam`算法的训练结果如下: - -``` -[0] Accuracy: 0.9657 -[1] Accuracy: 0.9724 -[2] Accuracy: 0.9759 -[3] Accuracy: 0.9769 -[4] Accuracy: 0.9788 -[5] Accuracy: 0.9778 -[6] Accuracy: 0.9775 -[7] Accuracy: 0.9759 -[8] Accuracy: 0.9786 -[9] Accuracy: 0.9779 -``` - - - +# PRML-2021 Assignment2 + +姓名:夏海淞 + +学号:18307130090 + +## 简述 + +在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了`Momentum`和`Adam`优化算法,并比较了它们的性能。 + +## 算子的反向传播 + +### `Matmul` + +`Matmul`的计算公式为: +$$ +Y=X\times W +$$ +其中$Y,X,W$分别为$n\times d',n\times d,d\times d'$的矩阵。 + +由[神经网络与深度学习-邱锡鹏](https://nndl.github.io/nndl-book.pdf)中公式(B.20)和(B.21),有 +$$ +\frac{\partial Y}{\partial W}=\frac{\partial(X\times W)}{\partial W}=X^T\\\\ +\frac{\partial Y}{\partial X}=\frac{\partial(X\times W)}{\partial X}=W^T +$$ +结合链式法则和矩阵运算法则,可得 +$$ +\nabla_X=\nabla_Y\times W^T\\\\ +\nabla_W=X^T\times \nabla_Y +$$ + +### `Relu` + +`Relu`的计算公式为: +$$ +Y_{ij}=\begin{cases} +X_{ij}&X_{ij}\ge0\\\\ +0&\text{otherwise} +\end{cases} +$$ +因此有 +$$ +\frac{\partial Y_{ij}}{\partial X_{ij}}=\begin{cases} +1&X_{ij}>0\\\\ +0&\text{otherwise} +\end{cases} +$$ +结合链式法则,得到反向传播的计算公式: +$$ + +\nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} +$$ + +### `Log` + +`Log`的计算公式为 +$$ +Y_{ij}=\ln(X_{ij}+\epsilon),\epsilon=10^{-12} +$$ +因此有 +$$ +\frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} +$$ +结合链式法则,得到反向传播的计算公式: +$$ + +\nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial {X_{ij}}} +$$ + +### `Softmax` + +`Softmax`的计算公式为 +$$ +Y_{ij}=\frac{\exp\{X_{ij} \}}{\sum_{k=1}^c\exp\{X_{ik} \}} +$$ +其中$Y,X$均为$N\times c$的矩阵。容易发现`Softmax`以$X$的每行作为单位进行运算。因此对于$X,Y$的行分量$X_k,Y_k$,有 +$$ +\frac{\partial Y_{ki}}{\partial X_{kj}}=\begin{cases} +\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\\\ +-\frac{\exp\{X_{ki} \}\exp\{X_{kj} \}}{(\sum_t\exp\{X_{kt}\})^2}=-Y_{ki}Y_{kj}&i\not=j +\end{cases} +$$ +因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\partial Y_{ki}}{\partial X_{kj}}$。结合链式法则,可得 +$$ +\nabla_X=\nabla_Y\times J +$$ +将行分量组合起来,就得到了反向传播的最终结果。 + +## 模型构建与训练 + +### 模型构建 + +#### `forward` + +参考`torch_mnist.py`中`TorchModel`方法的模型,使用如下代码构建: + +```python +def forward(self, x): + x = x.reshape(-1, 28 * 28) + + x = self.relu_1.forward(self.matmul_1.forward(x, self.W1)) + x = self.relu_2.forward(self.matmul_2.forward(x, self.W2)) + x = self.matmul_3.forward(x, self.W3) + + x = self.log.forward(self.softmax.forward(x)) + + return x +``` + +模型的计算图如下: + +![](./img/fnn_model.png) + +#### `backward` + +根据模型的计算图,按照反向的计算顺序依次调用对应算子的反向传播算法即可。 + +```python +def backward(self, y): + self.log_grad = self.log.backward(y) + self.softmax_grad = self.softmax.backward(self.log_grad) + self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad) + self.relu_2_grad = self.relu_2.backward(self.x3_grad) + self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad) + self.relu_1_grad = self.relu_1.backward(self.x2_grad) + self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad) + + return self.x1_grad +``` + +#### `mini_batch` + +`mini_batch`的作用是提高模型的训练速度,同时得到较好的优化效果。传统的批处理方法对整个数据集计算平均的损失函数值,随后计算相应梯度进行反向传播。当训练数据集容量较大时,对训练速度造成严重影响;而随机方法则对数据集的每个样本计算损失函数值,随后计算相应梯度进行反向传播。此时数据集容量不对训练速度产生影响,然而由于样本的随机性,可能导致参数无法收敛到最优值,在最优值附近震荡。因此一个折中的方法是将数据集划分为若干批次,在提高训练速度的同时保证了较好的收敛效果。 + +在本次实验中,我参照`utils.py`中的`mini_batch`,在`numpy_mnist.py`中重新实现了`mini_batch`方法: + +```python +def mini_batch(dataset, batch_size=128): + data = np.array([np.array(each[0]) for each in dataset]) + label = np.array([each[1] for each in dataset]) + + size = data.shape[0] + index = np.arange(size) + np.random.shuffle(index) + + return [(data[index[i:i + batch_size]], label[index[i:i + batch_size]]) for i in range(0, size, batch_size)] +``` + +### 模型训练 + +设定`learning_rate=0.1`,`batch_size=128`,`epoch_number=10`。训练结果如下: + +``` +[0] Accuracy: 0.9486 +[1] Accuracy: 0.9643 +[2] Accuracy: 0.9724 +[3] Accuracy: 0.9738 +[4] Accuracy: 0.9781 +[5] Accuracy: 0.9768 +[6] Accuracy: 0.9796 +[7] Accuracy: 0.9802 +[8] Accuracy: 0.9800 +[9] Accuracy: 0.9796 +``` + + + +尝试缩减`batch_size`的大小,设定`batch_size=64`。训练结果如下: + +``` +[0] Accuracy: 0.9597 +[1] Accuracy: 0.9715 +[2] Accuracy: 0.9739 +[3] Accuracy: 0.9771 +[4] Accuracy: 0.9775 +[5] Accuracy: 0.9803 +[6] Accuracy: 0.9808 +[7] Accuracy: 0.9805 +[8] Accuracy: 0.9805 +[9] Accuracy: 0.9716 +``` + + + +尝试降低`learning_rate`,设定`learning_rate=0.01`。训练结果如下: + +``` +[0] Accuracy: 0.8758 +[1] Accuracy: 0.9028 +[2] Accuracy: 0.9143 +[3] Accuracy: 0.9234 +[4] Accuracy: 0.9298 +[5] Accuracy: 0.9350 +[6] Accuracy: 0.9397 +[7] Accuracy: 0.9434 +[8] Accuracy: 0.9459 +[9] Accuracy: 0.9501 +``` + + + +根据实验结果,可以得出以下结论: + +当学习率和批处理容量合适时,参数的收敛速度随着学习率的减小而减小,而参数的震荡幅度随着批处理容量的减小而增大。 + +## 梯度下降算法的改进 + +传统的梯度下降算法可以表述为: +$$ +w_{t+1}=w_t-\eta\cdot\nabla f(w_t) +$$ +尽管梯度下降作为优化算法被广泛使用,它依然存在一些缺点,主要表现为: + +- 参数修正方向完全由当前梯度决定,导致当学习率过高时参数可能在最优点附近震荡; +- 学习率无法随着训练进度改变,导致训练前期收敛速度较慢,后期可能无法收敛。 + +针对上述缺陷,产生了许多梯度下降算法的改进算法。其中较为典型的是`Momentum`算法和`Adam`算法。 + +### `Momentum` + +针对“参数修正方向完全由当前梯度决定”的问题,`Momentum`引入了“动量”的概念。 + +类比现实世界,当小球从高处向低处滚动时,其运动方向不仅与当前位置的“陡峭程度”相关,也和当前的速度,即先前位置的“陡峭程度”相关。因此在`Momentum`算法中,参数的修正值不是取决于当前梯度,而是取决于梯度的各时刻的指数移动平均值: +$$ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ +w_{t+1}=w_t-\eta\cdot m_t +$$ +指数移动平均值反映了参数调整时的“惯性”。当参数调整方向正确时,`Momentum`有助于加快训练速度,减少震荡的幅度;然而当参数调整方向错误时,`Momentum`会因为无法及时调整方向造成性能上的部分损失。 + +使用`Momentum`算法的训练结果如下: + +``` +[0] Accuracy: 0.9444 +[1] Accuracy: 0.9627 +[2] Accuracy: 0.9681 +[3] Accuracy: 0.9731 +[4] Accuracy: 0.9765 +[5] Accuracy: 0.9755 +[6] Accuracy: 0.9768 +[7] Accuracy: 0.9790 +[8] Accuracy: 0.9794 +[9] Accuracy: 0.9819 +``` + + + +可以看出相较传统的梯度下降算法并无明显优势。 + +### `Adam` + +针对“学习率无法随着训练进度改变”的问题,`Adam`在`Momentum`的基础上引入了“二阶动量”的概念。 + +`Adam`的改进思路为:由于神经网络中存在大量参数,不同参数的调整频率存在差别。对于频繁更新的参数,我们希望适当降低其学习率,提高收敛概率;而对于其他参数,我们希望适当增大其学习率,加快收敛速度。同时,参数的调整频率可能发生动态改变,我们也希望学习率能够随之动态调整。 + +因为参数的调整值与当前梯度直接相关,因此取历史梯度的平方和作为衡量参数调整频率的标准。如果历史梯度平方和较大,表明参数被频繁更新,需要降低其学习率。因此梯度下降算法改写为: +$$ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ +V_t=V_{t-1}+\nabla^2f(w_t)\\\\ +w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t +$$ +然而,由于$V_t$关于$t$单调递增,可能导致训练后期学习率过低,参数无法收敛至最优。因此将$V_t$也改为指数移动平均值,避免了上述缺陷: +$$ +m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\\\ +V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\\\ +w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t +$$ +使用`Adam`算法的训练结果如下: + +``` +[0] Accuracy: 0.9657 +[1] Accuracy: 0.9724 +[2] Accuracy: 0.9759 +[3] Accuracy: 0.9769 +[4] Accuracy: 0.9788 +[5] Accuracy: 0.9778 +[6] Accuracy: 0.9775 +[7] Accuracy: 0.9759 +[8] Accuracy: 0.9786 +[9] Accuracy: 0.9779 +``` + + + 可以看出相较传统的梯度下降算法,损失函数值的震荡幅度有所减小,而收敛速度与传统方法相当。 \ No newline at end of file -- Gitee From 4713d01a3fb01397a8f20bd4e6103d0d22394ea7 Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Wed, 28 Apr 2021 10:09:24 +0800 Subject: [PATCH 7/8] modify README --- assignment-2/submission/18307130090/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/assignment-2/submission/18307130090/README.md b/assignment-2/submission/18307130090/README.md index 2420cc9..c42d425 100644 --- a/assignment-2/submission/18307130090/README.md +++ b/assignment-2/submission/18307130090/README.md @@ -46,6 +46,8 @@ $$ \end{cases} $$ 结合链式法则,得到反向传播的计算公式: + + $$ \nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} @@ -62,6 +64,8 @@ $$ \frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} $$ 结合链式法则,得到反向传播的计算公式: + + $$ \nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial {X_{ij}}} -- Gitee From 1fbc451b23fd6db0771e7ac64bf61790065727f6 Mon Sep 17 00:00:00 2001 From: xhs7700 Date: Wed, 28 Apr 2021 10:11:36 +0800 Subject: [PATCH 8/8] modify README --- assignment-2/submission/18307130090/README.md | 562 +++++++++--------- 1 file changed, 275 insertions(+), 287 deletions(-) diff --git a/assignment-2/submission/18307130090/README.md b/assignment-2/submission/18307130090/README.md index c42d425..647eb99 100644 --- a/assignment-2/submission/18307130090/README.md +++ b/assignment-2/submission/18307130090/README.md @@ -1,288 +1,276 @@ -# PRML-2021 Assignment2 - -姓名:夏海淞 - -学号:18307130090 - -## 简述 - -在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了`Momentum`和`Adam`优化算法,并比较了它们的性能。 - -## 算子的反向传播 - -### `Matmul` - -`Matmul`的计算公式为: -$$ -Y=X\times W -$$ -其中$Y,X,W$分别为$n\times d',n\times d,d\times d'$的矩阵。 - -由[神经网络与深度学习-邱锡鹏](https://nndl.github.io/nndl-book.pdf)中公式(B.20)和(B.21),有 -$$ -\frac{\partial Y}{\partial W}=\frac{\partial(X\times W)}{\partial W}=X^T\\\\ -\frac{\partial Y}{\partial X}=\frac{\partial(X\times W)}{\partial X}=W^T -$$ -结合链式法则和矩阵运算法则,可得 -$$ -\nabla_X=\nabla_Y\times W^T\\\\ -\nabla_W=X^T\times \nabla_Y -$$ - -### `Relu` - -`Relu`的计算公式为: -$$ -Y_{ij}=\begin{cases} -X_{ij}&X_{ij}\ge0\\\\ -0&\text{otherwise} -\end{cases} -$$ -因此有 -$$ -\frac{\partial Y_{ij}}{\partial X_{ij}}=\begin{cases} -1&X_{ij}>0\\\\ -0&\text{otherwise} -\end{cases} -$$ -结合链式法则,得到反向传播的计算公式: - - -$$ - -\nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}} -$$ - -### `Log` - -`Log`的计算公式为 -$$ -Y_{ij}=\ln(X_{ij}+\epsilon),\epsilon=10^{-12} -$$ -因此有 -$$ -\frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} -$$ -结合链式法则,得到反向传播的计算公式: - - -$$ - -\nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial {X_{ij}}} -$$ - -### `Softmax` - -`Softmax`的计算公式为 -$$ -Y_{ij}=\frac{\exp\{X_{ij} \}}{\sum_{k=1}^c\exp\{X_{ik} \}} -$$ -其中$Y,X$均为$N\times c$的矩阵。容易发现`Softmax`以$X$的每行作为单位进行运算。因此对于$X,Y$的行分量$X_k,Y_k$,有 -$$ -\frac{\partial Y_{ki}}{\partial X_{kj}}=\begin{cases} -\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\\\ --\frac{\exp\{X_{ki} \}\exp\{X_{kj} \}}{(\sum_t\exp\{X_{kt}\})^2}=-Y_{ki}Y_{kj}&i\not=j -\end{cases} -$$ -因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\partial Y_{ki}}{\partial X_{kj}}$。结合链式法则,可得 -$$ -\nabla_X=\nabla_Y\times J -$$ -将行分量组合起来,就得到了反向传播的最终结果。 - -## 模型构建与训练 - -### 模型构建 - -#### `forward` - -参考`torch_mnist.py`中`TorchModel`方法的模型,使用如下代码构建: - -```python -def forward(self, x): - x = x.reshape(-1, 28 * 28) - - x = self.relu_1.forward(self.matmul_1.forward(x, self.W1)) - x = self.relu_2.forward(self.matmul_2.forward(x, self.W2)) - x = self.matmul_3.forward(x, self.W3) - - x = self.log.forward(self.softmax.forward(x)) - - return x -``` - -模型的计算图如下: - -![](./img/fnn_model.png) - -#### `backward` - -根据模型的计算图,按照反向的计算顺序依次调用对应算子的反向传播算法即可。 - -```python -def backward(self, y): - self.log_grad = self.log.backward(y) - self.softmax_grad = self.softmax.backward(self.log_grad) - self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad) - self.relu_2_grad = self.relu_2.backward(self.x3_grad) - self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad) - self.relu_1_grad = self.relu_1.backward(self.x2_grad) - self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad) - - return self.x1_grad -``` - -#### `mini_batch` - -`mini_batch`的作用是提高模型的训练速度,同时得到较好的优化效果。传统的批处理方法对整个数据集计算平均的损失函数值,随后计算相应梯度进行反向传播。当训练数据集容量较大时,对训练速度造成严重影响;而随机方法则对数据集的每个样本计算损失函数值,随后计算相应梯度进行反向传播。此时数据集容量不对训练速度产生影响,然而由于样本的随机性,可能导致参数无法收敛到最优值,在最优值附近震荡。因此一个折中的方法是将数据集划分为若干批次,在提高训练速度的同时保证了较好的收敛效果。 - -在本次实验中,我参照`utils.py`中的`mini_batch`,在`numpy_mnist.py`中重新实现了`mini_batch`方法: - -```python -def mini_batch(dataset, batch_size=128): - data = np.array([np.array(each[0]) for each in dataset]) - label = np.array([each[1] for each in dataset]) - - size = data.shape[0] - index = np.arange(size) - np.random.shuffle(index) - - return [(data[index[i:i + batch_size]], label[index[i:i + batch_size]]) for i in range(0, size, batch_size)] -``` - -### 模型训练 - -设定`learning_rate=0.1`,`batch_size=128`,`epoch_number=10`。训练结果如下: - -``` -[0] Accuracy: 0.9486 -[1] Accuracy: 0.9643 -[2] Accuracy: 0.9724 -[3] Accuracy: 0.9738 -[4] Accuracy: 0.9781 -[5] Accuracy: 0.9768 -[6] Accuracy: 0.9796 -[7] Accuracy: 0.9802 -[8] Accuracy: 0.9800 -[9] Accuracy: 0.9796 -``` - - - -尝试缩减`batch_size`的大小,设定`batch_size=64`。训练结果如下: - -``` -[0] Accuracy: 0.9597 -[1] Accuracy: 0.9715 -[2] Accuracy: 0.9739 -[3] Accuracy: 0.9771 -[4] Accuracy: 0.9775 -[5] Accuracy: 0.9803 -[6] Accuracy: 0.9808 -[7] Accuracy: 0.9805 -[8] Accuracy: 0.9805 -[9] Accuracy: 0.9716 -``` - - - -尝试降低`learning_rate`,设定`learning_rate=0.01`。训练结果如下: - -``` -[0] Accuracy: 0.8758 -[1] Accuracy: 0.9028 -[2] Accuracy: 0.9143 -[3] Accuracy: 0.9234 -[4] Accuracy: 0.9298 -[5] Accuracy: 0.9350 -[6] Accuracy: 0.9397 -[7] Accuracy: 0.9434 -[8] Accuracy: 0.9459 -[9] Accuracy: 0.9501 -``` - - - -根据实验结果,可以得出以下结论: - -当学习率和批处理容量合适时,参数的收敛速度随着学习率的减小而减小,而参数的震荡幅度随着批处理容量的减小而增大。 - -## 梯度下降算法的改进 - -传统的梯度下降算法可以表述为: -$$ -w_{t+1}=w_t-\eta\cdot\nabla f(w_t) -$$ -尽管梯度下降作为优化算法被广泛使用,它依然存在一些缺点,主要表现为: - -- 参数修正方向完全由当前梯度决定,导致当学习率过高时参数可能在最优点附近震荡; -- 学习率无法随着训练进度改变,导致训练前期收敛速度较慢,后期可能无法收敛。 - -针对上述缺陷,产生了许多梯度下降算法的改进算法。其中较为典型的是`Momentum`算法和`Adam`算法。 - -### `Momentum` - -针对“参数修正方向完全由当前梯度决定”的问题,`Momentum`引入了“动量”的概念。 - -类比现实世界,当小球从高处向低处滚动时,其运动方向不仅与当前位置的“陡峭程度”相关,也和当前的速度,即先前位置的“陡峭程度”相关。因此在`Momentum`算法中,参数的修正值不是取决于当前梯度,而是取决于梯度的各时刻的指数移动平均值: -$$ -m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ -w_{t+1}=w_t-\eta\cdot m_t -$$ -指数移动平均值反映了参数调整时的“惯性”。当参数调整方向正确时,`Momentum`有助于加快训练速度,减少震荡的幅度;然而当参数调整方向错误时,`Momentum`会因为无法及时调整方向造成性能上的部分损失。 - -使用`Momentum`算法的训练结果如下: - -``` -[0] Accuracy: 0.9444 -[1] Accuracy: 0.9627 -[2] Accuracy: 0.9681 -[3] Accuracy: 0.9731 -[4] Accuracy: 0.9765 -[5] Accuracy: 0.9755 -[6] Accuracy: 0.9768 -[7] Accuracy: 0.9790 -[8] Accuracy: 0.9794 -[9] Accuracy: 0.9819 -``` - - - -可以看出相较传统的梯度下降算法并无明显优势。 - -### `Adam` - -针对“学习率无法随着训练进度改变”的问题,`Adam`在`Momentum`的基础上引入了“二阶动量”的概念。 - -`Adam`的改进思路为:由于神经网络中存在大量参数,不同参数的调整频率存在差别。对于频繁更新的参数,我们希望适当降低其学习率,提高收敛概率;而对于其他参数,我们希望适当增大其学习率,加快收敛速度。同时,参数的调整频率可能发生动态改变,我们也希望学习率能够随之动态调整。 - -因为参数的调整值与当前梯度直接相关,因此取历史梯度的平方和作为衡量参数调整频率的标准。如果历史梯度平方和较大,表明参数被频繁更新,需要降低其学习率。因此梯度下降算法改写为: -$$ -m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ -V_t=V_{t-1}+\nabla^2f(w_t)\\\\ -w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t -$$ -然而,由于$V_t$关于$t$单调递增,可能导致训练后期学习率过低,参数无法收敛至最优。因此将$V_t$也改为指数移动平均值,避免了上述缺陷: -$$ -m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\\\ -V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\\\ -w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t -$$ -使用`Adam`算法的训练结果如下: - -``` -[0] Accuracy: 0.9657 -[1] Accuracy: 0.9724 -[2] Accuracy: 0.9759 -[3] Accuracy: 0.9769 -[4] Accuracy: 0.9788 -[5] Accuracy: 0.9778 -[6] Accuracy: 0.9775 -[7] Accuracy: 0.9759 -[8] Accuracy: 0.9786 -[9] Accuracy: 0.9779 -``` - - - +# PRML-2021 Assignment2 + +姓名:夏海淞 + +学号:18307130090 + +## 简述 + +在本次实验中,我通过`NumPy`实现了一个简单的前馈神经网络,其中包括`numpy_fnn.py`中算子的反向传播以及前馈神经网络模型的构建。为了验证模型效果,我在MNIST数据集上进行了训练和测试。此外,我还实现了`Momentum`和`Adam`优化算法,并比较了它们的性能。 + +## 算子的反向传播 + +### `Matmul` + +`Matmul`的计算公式为: +$$ +Y=X\times W +$$ +其中$Y,X,W$分别为$n\times d',n\times d,d\times d'$的矩阵。 + +由[神经网络与深度学习-邱锡鹏](https://nndl.github.io/nndl-book.pdf)中公式(B.20)和(B.21),有 +$$ +\frac{\partial Y}{\partial W}=\frac{\partial(X\times W)}{\partial W}=X^T\\\\ +\frac{\partial Y}{\partial X}=\frac{\partial(X\times W)}{\partial X}=W^T +$$ +结合链式法则和矩阵运算法则,可得 +$$ +\nabla_X=\nabla_Y\times W^T\\\\ +\nabla_W=X^T\times \nabla_Y +$$ + +### `Relu` + +`Relu`的计算公式为: +$$ +Y_{ij}=\begin{cases} +X_{ij}&X_{ij}\ge0\\\\ +0&\text{otherwise} +\end{cases} +$$ +因此有 +$$ +\frac{\partial Y_{ij}}{\partial X_{ij}}=\begin{cases} +1&X_{ij}>0\\\\ +0&\text{otherwise} +\end{cases} +$$ +结合链式法则,得到反向传播的计算公式:$\nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial X_{ij}}$ + +### `Log` + +`Log`的计算公式为 +$$ +Y_{ij}=\ln(X_{ij}+\epsilon),\epsilon=10^{-12} +$$ +因此有 +$$ +\frac{\partial Y_{ij}}{\partial X_{ij}}=\frac1{X_{ij}+\epsilon} +$$ +结合链式法则,得到反向传播的计算公式:$\nabla_{Xij}=\nabla_{Yij}\cdot\frac{\partial Y_{ij}}{\partial {X_{ij}}}$ + +### `Softmax` + +`Softmax`的计算公式为 +$$ +Y_{ij}=\frac{\exp\{X_{ij} \}}{\sum_{k=1}^c\exp\{X_{ik} \}} +$$ +其中$Y,X$均为$N\times c$的矩阵。容易发现`Softmax`以$X$的每行作为单位进行运算。因此对于$X,Y$的行分量$X_k,Y_k$,有 +$$ +\frac{\partial Y_{ki}}{\partial X_{kj}}=\begin{cases} +\frac{\exp\{X_{kj} \}(\sum_t\exp\{X_{kt}\})-\exp\{2X_{ki}\}}{(\sum_t\exp\{X_{kt}\})^2}=Y_{ki}(1-Y_{ki})&i=j\\\\ +-\frac{\exp\{X_{ki} \}\exp\{X_{kj} \}}{(\sum_t\exp\{X_{kt}\})^2}=-Y_{ki}Y_{kj}&i\not=j +\end{cases} +$$ +因此可计算得到$X_k,Y_k$的Jacob矩阵,满足$J_{ij}=\frac{\partial Y_{ki}}{\partial X_{kj}}$。结合链式法则,可得 +$$ +\nabla_X=\nabla_Y\times J +$$ +将行分量组合起来,就得到了反向传播的最终结果。 + +## 模型构建与训练 + +### 模型构建 + +#### `forward` + +参考`torch_mnist.py`中`TorchModel`方法的模型,使用如下代码构建: + +```python +def forward(self, x): + x = x.reshape(-1, 28 * 28) + + x = self.relu_1.forward(self.matmul_1.forward(x, self.W1)) + x = self.relu_2.forward(self.matmul_2.forward(x, self.W2)) + x = self.matmul_3.forward(x, self.W3) + + x = self.log.forward(self.softmax.forward(x)) + + return x +``` + +模型的计算图如下: + +![](./img/fnn_model.png) + +#### `backward` + +根据模型的计算图,按照反向的计算顺序依次调用对应算子的反向传播算法即可。 + +```python +def backward(self, y): + self.log_grad = self.log.backward(y) + self.softmax_grad = self.softmax.backward(self.log_grad) + self.x3_grad, self.W3_grad = self.matmul_3.backward(self.softmax_grad) + self.relu_2_grad = self.relu_2.backward(self.x3_grad) + self.x2_grad, self.W2_grad = self.matmul_2.backward(self.relu_2_grad) + self.relu_1_grad = self.relu_1.backward(self.x2_grad) + self.x1_grad, self.W1_grad = self.matmul_1.backward(self.relu_1_grad) + + return self.x1_grad +``` + +#### `mini_batch` + +`mini_batch`的作用是提高模型的训练速度,同时得到较好的优化效果。传统的批处理方法对整个数据集计算平均的损失函数值,随后计算相应梯度进行反向传播。当训练数据集容量较大时,对训练速度造成严重影响;而随机方法则对数据集的每个样本计算损失函数值,随后计算相应梯度进行反向传播。此时数据集容量不对训练速度产生影响,然而由于样本的随机性,可能导致参数无法收敛到最优值,在最优值附近震荡。因此一个折中的方法是将数据集划分为若干批次,在提高训练速度的同时保证了较好的收敛效果。 + +在本次实验中,我参照`utils.py`中的`mini_batch`,在`numpy_mnist.py`中重新实现了`mini_batch`方法: + +```python +def mini_batch(dataset, batch_size=128): + data = np.array([np.array(each[0]) for each in dataset]) + label = np.array([each[1] for each in dataset]) + + size = data.shape[0] + index = np.arange(size) + np.random.shuffle(index) + + return [(data[index[i:i + batch_size]], label[index[i:i + batch_size]]) for i in range(0, size, batch_size)] +``` + +### 模型训练 + +设定`learning_rate=0.1`,`batch_size=128`,`epoch_number=10`。训练结果如下: + +``` +[0] Accuracy: 0.9486 +[1] Accuracy: 0.9643 +[2] Accuracy: 0.9724 +[3] Accuracy: 0.9738 +[4] Accuracy: 0.9781 +[5] Accuracy: 0.9768 +[6] Accuracy: 0.9796 +[7] Accuracy: 0.9802 +[8] Accuracy: 0.9800 +[9] Accuracy: 0.9796 +``` + + + +尝试缩减`batch_size`的大小,设定`batch_size=64`。训练结果如下: + +``` +[0] Accuracy: 0.9597 +[1] Accuracy: 0.9715 +[2] Accuracy: 0.9739 +[3] Accuracy: 0.9771 +[4] Accuracy: 0.9775 +[5] Accuracy: 0.9803 +[6] Accuracy: 0.9808 +[7] Accuracy: 0.9805 +[8] Accuracy: 0.9805 +[9] Accuracy: 0.9716 +``` + + + +尝试降低`learning_rate`,设定`learning_rate=0.01`。训练结果如下: + +``` +[0] Accuracy: 0.8758 +[1] Accuracy: 0.9028 +[2] Accuracy: 0.9143 +[3] Accuracy: 0.9234 +[4] Accuracy: 0.9298 +[5] Accuracy: 0.9350 +[6] Accuracy: 0.9397 +[7] Accuracy: 0.9434 +[8] Accuracy: 0.9459 +[9] Accuracy: 0.9501 +``` + + + +根据实验结果,可以得出以下结论: + +当学习率和批处理容量合适时,参数的收敛速度随着学习率的减小而减小,而参数的震荡幅度随着批处理容量的减小而增大。 + +## 梯度下降算法的改进 + +传统的梯度下降算法可以表述为: +$$ +w_{t+1}=w_t-\eta\cdot\nabla f(w_t) +$$ +尽管梯度下降作为优化算法被广泛使用,它依然存在一些缺点,主要表现为: + +- 参数修正方向完全由当前梯度决定,导致当学习率过高时参数可能在最优点附近震荡; +- 学习率无法随着训练进度改变,导致训练前期收敛速度较慢,后期可能无法收敛。 + +针对上述缺陷,产生了许多梯度下降算法的改进算法。其中较为典型的是`Momentum`算法和`Adam`算法。 + +### `Momentum` + +针对“参数修正方向完全由当前梯度决定”的问题,`Momentum`引入了“动量”的概念。 + +类比现实世界,当小球从高处向低处滚动时,其运动方向不仅与当前位置的“陡峭程度”相关,也和当前的速度,即先前位置的“陡峭程度”相关。因此在`Momentum`算法中,参数的修正值不是取决于当前梯度,而是取决于梯度的各时刻的指数移动平均值: +$$ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ +w_{t+1}=w_t-\eta\cdot m_t +$$ +指数移动平均值反映了参数调整时的“惯性”。当参数调整方向正确时,`Momentum`有助于加快训练速度,减少震荡的幅度;然而当参数调整方向错误时,`Momentum`会因为无法及时调整方向造成性能上的部分损失。 + +使用`Momentum`算法的训练结果如下: + +``` +[0] Accuracy: 0.9444 +[1] Accuracy: 0.9627 +[2] Accuracy: 0.9681 +[3] Accuracy: 0.9731 +[4] Accuracy: 0.9765 +[5] Accuracy: 0.9755 +[6] Accuracy: 0.9768 +[7] Accuracy: 0.9790 +[8] Accuracy: 0.9794 +[9] Accuracy: 0.9819 +``` + + + +可以看出相较传统的梯度下降算法并无明显优势。 + +### `Adam` + +针对“学习率无法随着训练进度改变”的问题,`Adam`在`Momentum`的基础上引入了“二阶动量”的概念。 + +`Adam`的改进思路为:由于神经网络中存在大量参数,不同参数的调整频率存在差别。对于频繁更新的参数,我们希望适当降低其学习率,提高收敛概率;而对于其他参数,我们希望适当增大其学习率,加快收敛速度。同时,参数的调整频率可能发生动态改变,我们也希望学习率能够随之动态调整。 + +因为参数的调整值与当前梯度直接相关,因此取历史梯度的平方和作为衡量参数调整频率的标准。如果历史梯度平方和较大,表明参数被频繁更新,需要降低其学习率。因此梯度下降算法改写为: +$$ +m_t=\beta\cdot m_{t-1}+(1-\beta)\cdot\nabla f(w_t)\\\\ +V_t=V_{t-1}+\nabla^2f(w_t)\\\\ +w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t +$$ +然而,由于$V_t$关于$t$单调递增,可能导致训练后期学习率过低,参数无法收敛至最优。因此将$V_t$也改为指数移动平均值,避免了上述缺陷: +$$ +m_t=\beta_1\cdot m_{t-1}+(1-\beta_1)\cdot\nabla f(w_t)\\\\ +V_t=\beta_2\cdot V_{t-1}+(1-\beta_2)\cdot\nabla^2f(w_t)\\\\ +w_{t+1}=w_t-\frac\eta{\sqrt{V_t}}\cdot m_t +$$ +使用`Adam`算法的训练结果如下: + +``` +[0] Accuracy: 0.9657 +[1] Accuracy: 0.9724 +[2] Accuracy: 0.9759 +[3] Accuracy: 0.9769 +[4] Accuracy: 0.9788 +[5] Accuracy: 0.9778 +[6] Accuracy: 0.9775 +[7] Accuracy: 0.9759 +[8] Accuracy: 0.9786 +[9] Accuracy: 0.9779 +``` + + + 可以看出相较传统的梯度下降算法,损失函数值的震荡幅度有所减小,而收敛速度与传统方法相当。 \ No newline at end of file -- Gitee