From 9159f649b965f7dfbd53e5ef834c1b4145c24ba6 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Tue, 26 Dec 2017 13:33:17 -0500 Subject: [PATCH 001/329] python 3 --- unsupervised_class2/__init__.py | 0 unsupervised_class2/autoencoder.py | 87 +++++++++++++++-------- unsupervised_class2/rbm.py | 31 ++++---- unsupervised_class2/tsne_books.py | 14 ++-- unsupervised_class2/tsne_donut.py | 15 ++-- unsupervised_class2/tsne_mnist.py | 22 ++++-- unsupervised_class2/tsne_xor.py | 5 ++ unsupervised_class2/unsupervised.py | 48 ++++++++----- unsupervised_class2/util.py | 8 ++- unsupervised_class2/vanishing.py | 45 ++++++------ unsupervised_class2/visualize_features.py | 31 ++++---- unsupervised_class2/xwing.py | 42 ++++++----- 12 files changed, 216 insertions(+), 132 deletions(-) create mode 100644 unsupervised_class2/__init__.py diff --git a/unsupervised_class2/__init__.py b/unsupervised_class2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/unsupervised_class2/autoencoder.py b/unsupervised_class2/autoencoder.py index 7c1599d8..87633fa1 100644 --- a/unsupervised_class2/autoencoder.py +++ b/unsupervised_class2/autoencoder.py @@ -14,19 +14,42 @@ from util import relu, error_rate, getKaggleMNIST, init_weights +def T_shared_zeros_like32(p): + # p is a Theano shared itself + return theano.shared(np.zeros_like(p.get_value(), dtype=np.float32)) + +def momentum_updates(cost, params, mu, learning_rate): + # momentum changes + dparams = [T_shared_zeros_like32(p) for p in params] + + updates = [] + grads = T.grad(cost, params) + for p, dp, g in zip(params, dparams, grads): + dp_update = mu*dp - learning_rate*g + p_update = p + dp_update + + updates.append((dp, dp_update)) + updates.append((p, p_update)) + return updates + + class AutoEncoder(object): def __init__(self, M, an_id): self.M = M self.id = an_id def fit(self, X, learning_rate=0.5, mu=0.99, epochs=1, batch_sz=100, show_fig=False): + # cast to float + mu = np.float32(mu) + learning_rate = np.float32(learning_rate) + N, D = X.shape n_batches = N // batch_sz W0 = init_weights((D, self.M)) self.W = theano.shared(W0, 'W_%s' % self.id) - self.bh = theano.shared(np.zeros(self.M), 'bh_%s' % self.id) - self.bo = theano.shared(np.zeros(D), 'bo_%s' % self.id) + self.bh = theano.shared(np.zeros(self.M, dtype=np.float32), 'bh_%s' % self.id) + self.bo = theano.shared(np.zeros(D, dtype=np.float32), 'bo_%s' % self.id) self.params = [self.W, self.bh, self.bo] self.forward_params = [self.W, self.bh] @@ -61,11 +84,9 @@ def fit(self, X, learning_rate=0.5, mu=0.99, epochs=1, batch_sz=100, show_fig=Fa outputs=cost, ) - updates = [ - (p, p + mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, self.dparams) - ] + [ - (dp, mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, self.dparams) - ] + + + updates = momentum_updates(cost, self.params, mu, learning_rate) train_op = theano.function( inputs=[X_in], updates=updates, @@ -73,6 +94,7 @@ def fit(self, X, learning_rate=0.5, mu=0.99, epochs=1, batch_sz=100, show_fig=Fa costs = [] print("training autoencoder: %s" % self.id) + print("epochs to do:", epochs) for i in range(epochs): print("epoch:", i) X = shuffle(X) @@ -117,9 +139,22 @@ def __init__(self, hidden_layer_sizes, UnsupervisedModel=AutoEncoder): count += 1 - def fit(self, X, Y, Xtest, Ytest, pretrain=True, learning_rate=0.01, mu=0.99, reg=0.1, epochs=1, batch_sz=100): + def fit(self, X, Y, Xtest, Ytest, + pretrain=True, + train_head_only=False, + learning_rate=0.1, + mu=0.99, + reg=0.0, + epochs=1, + batch_sz=100): + + # cast to float32 + learning_rate = np.float32(learning_rate) + mu = np.float32(mu) + reg = np.float32(reg) + # greedy layer-wise training of autoencoders - pretrain_epochs = 1 + pretrain_epochs = 2 if not pretrain: pretrain_epochs = 0 @@ -135,38 +170,27 @@ def fit(self, X, Y, Xtest, Ytest, pretrain=True, learning_rate=0.01, mu=0.99, re K = len(set(Y)) W0 = init_weights((self.hidden_layers[-1].M, K)) self.W = theano.shared(W0, "W_logreg") - self.b = theano.shared(np.zeros(K), "b_logreg") + self.b = theano.shared(np.zeros(K, dtype=np.float32), "b_logreg") self.params = [self.W, self.b] - for ae in self.hidden_layers: - self.params += ae.forward_params - - # for momentum - self.dW = theano.shared(np.zeros(W0.shape), "dW_logreg") - self.db = theano.shared(np.zeros(K), "db_logreg") - self.dparams = [self.dW, self.db] - for ae in self.hidden_layers: - self.dparams += ae.forward_dparams + if not train_head_only: + for ae in self.hidden_layers: + self.params += ae.forward_params X_in = T.matrix('X_in') targets = T.ivector('Targets') pY = self.forward(X_in) - # squared_magnitude = [(p*p).sum() for p in self.params] - # reg_cost = T.sum(squared_magnitude) - cost = -T.mean( T.log(pY[T.arange(pY.shape[0]), targets]) ) #+ reg*reg_cost + squared_magnitude = [(p*p).sum() for p in self.params] + reg_cost = T.sum(squared_magnitude) + cost = -T.mean( T.log(pY[T.arange(pY.shape[0]), targets]) ) + reg*reg_cost prediction = self.predict(X_in) cost_predict_op = theano.function( inputs=[X_in, targets], outputs=[cost, prediction], ) - updates = [ - (p, p + mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, self.dparams) - ] + [ - (dp, mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, self.dparams) - ] - # updates = [(p, p - learning_rate*T.grad(cost, p)) for p in self.params] + updates = momentum_updates(cost, self.params, mu, learning_rate) train_op = theano.function( inputs=[X_in, targets], updates=updates, @@ -209,7 +233,8 @@ def main(): # dnn.fit(Xtrain, Ytrain, Xtest, Ytest, epochs=3) # vs dnn = DNN([1000, 750, 500]) - dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=False, epochs=10) + dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, train_head_only=False, epochs=3) + # note: try training the head only too! what does that mean? def test_single_autoencoder(): @@ -239,5 +264,5 @@ def test_single_autoencoder(): if __name__ == '__main__': - # main() - test_single_autoencoder() + main() + # test_single_autoencoder() diff --git a/unsupervised_class2/rbm.py b/unsupervised_class2/rbm.py index 3e45d178..2f3f9991 100644 --- a/unsupervised_class2/rbm.py +++ b/unsupervised_class2/rbm.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import theano import theano.tensor as T @@ -18,8 +23,12 @@ def __init__(self, M, an_id): self.rng = RandomStreams() def fit(self, X, learning_rate=0.1, epochs=1, batch_sz=100, show_fig=False): + # cast to float32 + learning_rate = np.float32(learning_rate) + + N, D = X.shape - n_batches = N / batch_sz + n_batches = N // batch_sz W0 = init_weights((D, self.M)) self.W = theano.shared(W0, 'W_%s' % self.id) @@ -28,14 +37,6 @@ def fit(self, X, learning_rate=0.1, epochs=1, batch_sz=100, show_fig=False): self.params = [self.W, self.c, self.b] self.forward_params = [self.W, self.c] - # we won't use this to fit the RBM but we will use these for backpropagation later - # TODO: technically they should be reset before doing backprop - self.dW = theano.shared(np.zeros(W0.shape), 'dW_%s' % self.id) - self.dc = theano.shared(np.zeros(self.M), 'dbh_%s' % self.id) - self.db = theano.shared(np.zeros(D), 'dbo_%s' % self.id) - self.dparams = [self.dW, self.dc, self.db] - self.forward_dparams = [self.dW, self.dc] - X_in = T.matrix('X_%s' % self.id) # attach it to the object so it can be used later @@ -50,7 +51,7 @@ def fit(self, X, learning_rate=0.1, epochs=1, batch_sz=100, show_fig=False): # but we would like to see how this cost function changes # as we do contrastive divergence X_hat = self.forward_output(X_in) - cost = -(X_in * T.log(X_hat) + (1 - X_in) * T.log(1 - X_hat)).sum() / (batch_sz * D) + cost = -(X_in * T.log(X_hat) + (1 - X_in) * T.log(1 - X_hat)).mean() cost_op = theano.function( inputs=[X_in], outputs=cost, @@ -71,15 +72,15 @@ def fit(self, X, learning_rate=0.1, epochs=1, batch_sz=100, show_fig=False): ) costs = [] - print "training rbm: %s" % self.id - for i in xrange(epochs): - print "epoch:", i + print("training rbm: %s" % self.id) + for i in range(epochs): + print("epoch:", i) X = shuffle(X) - for j in xrange(n_batches): + for j in range(n_batches): batch = X[j*batch_sz:(j*batch_sz + batch_sz)] train_op(batch) the_cost = cost_op(X) # technically we could also get the cost for Xtest here - print "j / n_batches:", j, "/", n_batches, "cost:", the_cost + print("j / n_batches:", j, "/", n_batches, "cost:", the_cost) costs.append(the_cost) if show_fig: plt.plot(costs) diff --git a/unsupervised_class2/tsne_books.py b/unsupervised_class2/tsne_books.py index 8253abbd..d31b8f2a 100644 --- a/unsupervised_class2/tsne_books.py +++ b/unsupervised_class2/tsne_books.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import nltk import numpy as np import matplotlib.pyplot as plt @@ -39,6 +44,7 @@ def my_tokenizer(s): for title in titles: try: title = title.encode('ascii', 'ignore') # this will throw exception if bad characters + title = title.decode('utf-8') all_titles.append(title) tokens = my_tokenizer(title) all_tokens.append(tokens) @@ -47,8 +53,8 @@ def my_tokenizer(s): word_index_map[token] = current_index current_index += 1 index_word_map.append(token) - except: - pass + except Exception as e: + print(e) @@ -67,13 +73,13 @@ def tokens_to_vector(tokens): for tokens in all_tokens: X[:,i] = tokens_to_vector(tokens) i += 1 -print "X.shape:", X.shape +print("X.shape:", X.shape) def main(): tsne = TSNE(perplexity=40) Z = tsne.fit_transform(X) plt.scatter(Z[:,0], Z[:,1]) - for i in xrange(D): + for i in range(D): plt.annotate(s=index_word_map[i], xy=(Z[i,0], Z[i,1])) plt.show() diff --git a/unsupervised_class2/tsne_donut.py b/unsupervised_class2/tsne_donut.py index 0d62c531..5e0724f3 100644 --- a/unsupervised_class2/tsne_donut.py +++ b/unsupervised_class2/tsne_donut.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import matplotlib.pyplot as plt @@ -13,16 +18,16 @@ def get_donut_data(): # distance from origin is radius + random normal # angle theta is uniformly distributed between (0, 2pi) - R1 = np.random.randn(N/2) + R_inner - theta = 2*np.pi*np.random.random(N/2) + R1 = np.random.randn(N//2) + R_inner + theta = 2*np.pi*np.random.random(N//2) X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T - R2 = np.random.randn(N/2) + R_outer - theta = 2*np.pi*np.random.random(N/2) + R2 = np.random.randn(N//2) + R_outer + theta = 2*np.pi*np.random.random(N//2) X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T X = np.concatenate([ X_inner, X_outer ]) - Y = np.array([0]*(N/2) + [1]*(N/2)) + Y = np.array([0]*(N//2) + [1]*(N//2)) return X, Y diff --git a/unsupervised_class2/tsne_mnist.py b/unsupervised_class2/tsne_mnist.py index 4076f563..d2ee1239 100644 --- a/unsupervised_class2/tsne_mnist.py +++ b/unsupervised_class2/tsne_mnist.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import matplotlib.pyplot as plt @@ -10,7 +15,7 @@ import sys sys.path.append(os.path.abspath('..')) from unsupervised_class.kmeans_mnist import purity -from unsupervised_class.gmm import gmm +from sklearn.mixture import GaussianMixture def main(): @@ -26,10 +31,17 @@ def main(): plt.show() # purity measure from unsupervised machine learning pt 1 - _, Rfull = gmm(X, 10, max_iter=30, smoothing=10e-1) - print "full purity:", purity(Y, Rfull) - _, Rreduced = gmm(Z, 10, max_iter=30, smoothing=10e-1) - print "reduced purity:", purity(Y, Rreduced) + # maximum purity is 1, higher is better + gmm = GaussianMixture(n_components=10) + gmm.fit(X) + Rfull = gmm.predict_proba(X) + print("Rfull.shape:", Rfull.shape) + print("full purity:", purity(Y, Rfull)) + + # now try the same thing on the reduced data + gmm.fit(Z) + Rreduced = gmm.predict_proba(Z) + print("reduced purity:", purity(Y, Rreduced)) if __name__ == '__main__': main() \ No newline at end of file diff --git a/unsupervised_class2/tsne_xor.py b/unsupervised_class2/tsne_xor.py index dbdaabc9..58e59001 100644 --- a/unsupervised_class2/tsne_xor.py +++ b/unsupervised_class2/tsne_xor.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import matplotlib.pyplot as plt diff --git a/unsupervised_class2/unsupervised.py b/unsupervised_class2/unsupervised.py index ec5a2d3a..9b3485b6 100644 --- a/unsupervised_class2/unsupervised.py +++ b/unsupervised_class2/unsupervised.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import theano import theano.tensor as T @@ -10,7 +15,7 @@ from sklearn.decomposition import PCA from theano.tensor.shared_randomstreams import RandomStreams from util import relu, error_rate, getKaggleMNIST, init_weights -from autoencoder import AutoEncoder +from autoencoder import AutoEncoder, momentum_updates from rbm import RBM @@ -48,22 +53,31 @@ def fit_to_input(self, k, learning_rate=1.0, mu=0.99, epochs=100000): # layer, not just the last layer. # Exercise for students: modify this function to be able # to activate neurons in the middle layers. + + # cast hyperperams + learning_rate = np.float32(learning_rate) + mu = np.float32(mu) + + # randomly initialize an image X0 = init_weights((1, self.D)) + + # make the image a shared so theano can update it X = theano.shared(X0, 'X_shared') - dX = theano.shared(np.zeros(X0.shape), 'dX_shared') + + # get the output of the neural network Y = self.forward(X) + # t = np.zeros(self.hidden_layers[-1].M) # t[k] = 1 # # choose Y[0] b/c it's shape 1xD, we want just a D-size vector, not 1xD matrix - # cost = -(t*T.log(Y[0]) + (1 - t)*(T.log(1 - Y[0]))).sum() + reg*(X * X).sum() + # cost = -(t*T.log(Y[0]) + (1 - t)*(T.log(1 - Y[0]))).sum() - cost = -T.log(Y[0,k]) + reg*(X * X).sum() + # k = which output node to look at + # there is only 1 image, so we select the 0th row of X + cost = -T.log(Y[0,k]) - updates = [ - (X, X + mu*dX - learning_rate*T.grad(cost, X)), - (dX, mu*dX - learning_rate*T.grad(cost, X)), - ] + updates = momentum_updates(cost, [X], mu, learning_rate) train = theano.function( inputs=[], outputs=[cost, Y], @@ -71,12 +85,12 @@ def fit_to_input(self, k, learning_rate=1.0, mu=0.99, epochs=100000): ) costs = [] - for i in xrange(epochs): - if i % 1000 == 0: - print "epoch:", i + for i in range(epochs): + if i % 10000 == 0: + print("epoch:", i) the_cost, out = train() if i == 0: - print "out.shape:", out.shape + print("out.shape:", out.shape) costs.append(the_cost) plt.plot(costs) plt.show() @@ -93,7 +107,7 @@ def load(filename, UnsupervisedModel=AutoEncoder): npz = np.load(filename) dbn.hidden_layers = [] count = 0 - for i in xrange(0, len(npz.files), 3): + for i in range(0, len(npz.files), 3): W = npz['arr_%s' % i] bh = npz['arr_%s' % (i + 1)] bo = npz['arr_%s' % (i + 2)] @@ -112,26 +126,26 @@ def main(): dbn = DBN([1000, 750, 500], UnsupervisedModel=AutoEncoder) # dbn = DBN([1000, 750, 500, 10]) output = dbn.fit(Xtrain, pretrain_epochs=2) - print "output.shape", output.shape + print("output.shape", output.shape) # sample before using t-SNE because it requires lots of RAM sample_size = 600 tsne = TSNE() reduced = tsne.fit_transform(output[:sample_size]) plt.scatter(reduced[:,0], reduced[:,1], s=100, c=Ytrain[:sample_size], alpha=0.5) - plt.title("t-SNE visualization") + plt.title("t-SNE visualization on data transformed by DBN") plt.show() # t-SNE on raw data reduced = tsne.fit_transform(Xtrain[:sample_size]) plt.scatter(reduced[:,0], reduced[:,1], s=100, c=Ytrain[:sample_size], alpha=0.5) - plt.title("t-SNE visualization") + plt.title("t-SNE visualization on raw data") plt.show() pca = PCA() reduced = pca.fit_transform(output) plt.scatter(reduced[:,0], reduced[:,1], s=100, c=Ytrain, alpha=0.5) - plt.title("PCA visualization") + plt.title("PCA visualization on data transformed by DBN") plt.show() if __name__ == '__main__': diff --git a/unsupervised_class2/util.py b/unsupervised_class2/util.py index 971a5cc9..3b4f2e4c 100644 --- a/unsupervised_class2/util.py +++ b/unsupervised_class2/util.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import pandas as pd @@ -30,4 +35,5 @@ def getKaggleMNIST(): def init_weights(shape): - return np.random.randn(*shape) / np.sqrt(sum(shape)) \ No newline at end of file + w = np.random.randn(*shape) / np.sqrt(sum(shape)) + return w.astype(np.float32) \ No newline at end of file diff --git a/unsupervised_class2/vanishing.py b/unsupervised_class2/vanishing.py index a696c60d..0f3b9775 100644 --- a/unsupervised_class2/vanishing.py +++ b/unsupervised_class2/vanishing.py @@ -1,23 +1,31 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import theano import theano.tensor as T import matplotlib.pyplot as plt from sklearn.utils import shuffle +from autoencoder import T_shared_zeros_like32, momentum_updates from util import relu, error_rate, getKaggleMNIST, init_weights class HiddenLayer(object): def __init__(self, D, M): W = init_weights((D, M)) - b = np.zeros(M) + b = np.zeros(M, dtype=np.float32) self.W = theano.shared(W) self.b = theano.shared(b) self.params = [self.W, self.b] def forward(self, X): + # we want to use the sigmoid so we can observe + # the vanishing gradient! return T.nnet.sigmoid(X.dot(self.W) + self.b) @@ -26,6 +34,10 @@ def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes def fit(self, X, Y, learning_rate=0.01, mu=0.99, epochs=30, batch_sz=100): + # cast to float32 + learning_rate = np.float32(learning_rate) + mu = np.float32(mu) + N, D = X.shape K = len(set(Y)) @@ -38,7 +50,7 @@ def fit(self, X, Y, learning_rate=0.01, mu=0.99, epochs=30, batch_sz=100): # initialize logistic regression layer W = init_weights((mo, K)) - b = np.zeros(K) + b = np.zeros(K, dtype=np.float32) self.W = theano.shared(W) self.b = theano.shared(b) @@ -55,39 +67,28 @@ def fit(self, X, Y, learning_rate=0.01, mu=0.99, epochs=30, batch_sz=100): cost = -T.mean( T.log(pY[T.arange(pY.shape[0]), targets]) ) prediction = self.predict(X_in) - # cost_predict_op = theano.function( - # inputs=[X_in, targets], - # outputs=[cost, prediction], - # ) - - dparams = [theano.shared(p.get_value()*0) for p in self.params] - grads = T.grad(cost, self.params) - - updates = [ - (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) - ] + [ - (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) - ] + + updates = momentum_updates(cost, self.params, mu, learning_rate) train_op = theano.function( inputs=[X_in, targets], outputs=[cost, prediction], updates=updates, ) - n_batches = N / batch_sz + n_batches = N // batch_sz costs = [] lastWs = [W.get_value() for W in self.allWs] W_changes = [] - print "supervised training..." - for i in xrange(epochs): - print "epoch:", i + print("supervised training...") + for i in range(epochs): + print("epoch:", i) X, Y = shuffle(X, Y) - for j in xrange(n_batches): + for j in range(n_batches): Xbatch = X[j*batch_sz:(j*batch_sz + batch_sz)] Ybatch = Y[j*batch_sz:(j*batch_sz + batch_sz)] c, p = train_op(Xbatch, Ybatch) if j % 100 == 0: - print "j / n_batches:", j, "/", n_batches, "cost:", c, "error:", error_rate(p, Ybatch) + print("j / n_batches:", j, "/", n_batches, "cost:", c, "error:", error_rate(p, Ybatch)) costs.append(c) # log changes in all Ws @@ -97,7 +98,7 @@ def fit(self, X, Y, learning_rate=0.01, mu=0.99, epochs=30, batch_sz=100): W_changes = np.array(W_changes) plt.subplot(2,1,1) - for i in xrange(W_changes.shape[1]): + for i in range(W_changes.shape[1]): plt.plot(W_changes[:,i], label='layer %s' % i) plt.legend() # plt.show() diff --git a/unsupervised_class2/visualize_features.py b/unsupervised_class2/visualize_features.py index 77f9c798..18139579 100644 --- a/unsupervised_class2/visualize_features.py +++ b/unsupervised_class2/visualize_features.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import theano import theano.tensor as T @@ -18,40 +23,40 @@ def main(loadfile=None, savefile=None): dbn = DBN.load(loadfile) else: dbn = DBN([1000, 750, 500, 10]) # AutoEncoder is default - dbn = DBN([1000, 750, 500, 10], UnsupervisedModel=RBM) - dbn.fit(Xtrain, pretrain_epochs=15) + # dbn = DBN([1000, 750, 500, 10], UnsupervisedModel=RBM) + dbn.fit(Xtrain, pretrain_epochs=2) if savefile: dbn.save(savefile) # first layer features # initial weight is D x M - # W = dbn.hidden_layers[0].W.eval() - # for i in xrange(dbn.hidden_layers[0].M): - # imgplot = plt.imshow(W[:,i].reshape(28, 28), cmap='gray') - # plt.show() - # should_quit = raw_input("Show more? Enter 'n' to quit\n") - # if should_quit == 'n': - # break + W = dbn.hidden_layers[0].W.eval() + for i in range(dbn.hidden_layers[0].M): + imgplot = plt.imshow(W[:,i].reshape(28, 28), cmap='gray') + plt.show() + should_quit = input("Show more? Enter 'n' to quit\n") + if should_quit == 'n': + break # features learned in the last layer - for k in xrange(dbn.hidden_layers[-1].M): + for k in range(dbn.hidden_layers[-1].M): # activate the kth node X = dbn.fit_to_input(k) imgplot = plt.imshow(X.reshape(28, 28), cmap='gray') plt.show() if k < dbn.hidden_layers[-1].M - 1: - should_quit = raw_input("Show more? Enter 'n' to quit\n") + should_quit = input("Show more? Enter 'n' to quit\n") if should_quit == 'n': break if __name__ == '__main__': # to load a saved file - main(loadfile='rbm15.npz') + # main(loadfile='rbm15.npz') # to neither load nor save - # main() + main() # to save a trained unsupervised deep network # main(savefile='rbm15.npz') \ No newline at end of file diff --git a/unsupervised_class2/xwing.py b/unsupervised_class2/xwing.py index 44557ebc..146ebea0 100644 --- a/unsupervised_class2/xwing.py +++ b/unsupervised_class2/xwing.py @@ -1,5 +1,10 @@ # https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python # https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import numpy as np import theano import theano.tensor as T @@ -7,20 +12,20 @@ from sklearn.utils import shuffle from util import relu, error_rate, getKaggleMNIST, init_weights +from autoencoder import momentum_updates # new additions used to compare purity measure using GMM import os import sys sys.path.append(os.path.abspath('..')) from unsupervised_class.kmeans_mnist import purity -# from unsupervised_class.gmm import gmm from sklearn.mixture import GaussianMixture class Layer(object): def __init__(self, m1, m2): W = init_weights((m1, m2)) - bi = np.zeros(m2) - bo = np.zeros(m1) + bi = np.zeros(m2, dtype=np.float32) + bo = np.zeros(m1, dtype=np.float32) self.W = theano.shared(W) self.bi = theano.shared(bi) self.bo = theano.shared(bo) @@ -38,8 +43,12 @@ def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes def fit(self, X, learning_rate=0.5, mu=0.99, epochs=50, batch_sz=100, show_fig=False): + # cast hyperparams + learning_rate = np.float32(learning_rate) + mu = np.float32(mu) + N, D = X.shape - n_batches = N / batch_sz + n_batches = N // batch_sz mi = D self.layers = [] @@ -59,14 +68,7 @@ def fit(self, X, learning_rate=0.5, mu=0.99, epochs=50, batch_sz=100, show_fig=F outputs=cost, ) - dparams = [theano.shared(p.get_value()*0) for p in self.params] - grads = T.grad(cost, self.params) - - updates = [ - (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) - ] + [ - (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) - ] + updates = momentum_updates(cost, self.params, mu, learning_rate) train_op = theano.function( inputs=[X_in], outputs=cost, @@ -74,14 +76,14 @@ def fit(self, X, learning_rate=0.5, mu=0.99, epochs=50, batch_sz=100, show_fig=F ) costs = [] - for i in xrange(epochs): - print "epoch:", i + for i in range(epochs): + print("epoch:", i) X = shuffle(X) - for j in xrange(n_batches): + for j in range(n_batches): batch = X[j*batch_sz:(j*batch_sz + batch_sz)] c = train_op(batch) if j % 100 == 0: - print "j / n_batches:", j, "/", n_batches, "cost:", c + print("j / n_batches:", j, "/", n_batches, "cost:", c) costs.append(c) if show_fig: plt.plot(costs) @@ -97,7 +99,7 @@ def forward(self, X): outputs=Z, ) - for i in xrange(len(self.layers)-1, -1, -1): + for i in range(len(self.layers)-1, -1, -1): Z = self.layers[i].forwardT(Z) return Z @@ -112,14 +114,16 @@ def main(): plt.show() # purity measure from unsupervised machine learning pt 1 + # NOTE: this will take a long time (i.e. just leave it overnight) gmm = GaussianMixture(n_components=10) gmm.fit(Xtrain) + print("Finished GMM training") responsibilities_full = gmm.predict_proba(Xtrain) - print "full purity:", purity(Ytrain, responsibilities_full) + print("full purity:", purity(Ytrain, responsibilities_full)) gmm.fit(mapping) responsibilities_reduced = gmm.predict_proba(mapping) - print "reduced purity:", purity(Ytrain, responsibilities_reduced) + print("reduced purity:", purity(Ytrain, responsibilities_reduced)) if __name__ == '__main__': From faadf24edb03e5979f7315a86d7ef891dbcda652 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Tue, 26 Dec 2017 13:34:00 -0500 Subject: [PATCH 002/329] relative import, faster gmm --- unsupervised_class/gmm.py | 19 ++++++++++--------- unsupervised_class/kmeans_mnist.py | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/unsupervised_class/gmm.py b/unsupervised_class/gmm.py index cda1bb32..d96f74b7 100644 --- a/unsupervised_class/gmm.py +++ b/unsupervised_class/gmm.py @@ -29,18 +29,19 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): weighted_pdfs = np.zeros((N, K)) # we'll use these to store the PDF value of sample n and Gaussian k for i in range(max_iter): # step 1: determine assignments / resposibilities - for k in range(K): - for n in range(N): - weighted_pdfs[n,k] = pi[k]*multivariate_normal.pdf(X[n], M[k], C[k]) + # this is the slow way + # for k in range(K): + # for n in range(N): + # weighted_pdfs[n,k] = pi[k]*multivariate_normal.pdf(X[n], M[k], C[k]) - for k in range(K): - for n in range(N): - R[n,k] = weighted_pdfs[n,k] / weighted_pdfs[n,:].sum() + # for k in range(K): + # for n in range(N): + # R[n,k] = weighted_pdfs[n,k] / weighted_pdfs[n,:].sum() # a faster way to do step 1: "vectorization" - # for k in range(K): - # weighted_pdfs[:,k] = pi[k]*multivariate_normal.pdf(X, M[k], C[k]) - # R = weighted_pdfs / weighted_pdfs.sum(axis=1, keepdims=True) + for k in range(K): + weighted_pdfs[:,k] = pi[k]*multivariate_normal.pdf(X, M[k], C[k]) + R = weighted_pdfs / weighted_pdfs.sum(axis=1, keepdims=True) # step 2: recalculate params for k in range(K): diff --git a/unsupervised_class/kmeans_mnist.py b/unsupervised_class/kmeans_mnist.py index 4d7bcf9d..3fa0293e 100644 --- a/unsupervised_class/kmeans_mnist.py +++ b/unsupervised_class/kmeans_mnist.py @@ -16,7 +16,7 @@ import numpy as np import pandas as pd import matplotlib.pyplot as plt -from kmeans import plot_k_means, get_simple_data +from .kmeans import plot_k_means, get_simple_data from datetime import datetime def get_data(limit=None): From ce6aba1f43c8bd3f5f3016263a4e98fb74caf8c5 Mon Sep 17 00:00:00 2001 From: Mac User Date: Wed, 27 Dec 2017 01:45:39 -0500 Subject: [PATCH 003/329] minor updates --- airline/ann.py | 2 +- airline/rnn.py | 2 +- ann_class2/extra_reading.txt | 6 +++++- bayesian_ml/3/run.py | 2 +- cnn_class/custom_blur.py | 10 ++++++++++ logistic_regression_class/logistic3.py | 10 ++++++---- nlp_class2/glove.py | 10 +++++----- nlp_class2/rntn_tensorflow.py | 2 +- nlp_class2/util.py | 17 ++++++++++++----- rl2/atari/dqn_tf.py | 2 +- rl2/atari/dqn_tf_alt.py | 8 ++++---- unsupervised_class/kmeans.py | 2 +- unsupervised_class/kmeans_visualize.py | 2 +- unsupervised_class2/autoencoder_tf.py | 2 +- 14 files changed, 50 insertions(+), 27 deletions(-) diff --git a/airline/ann.py b/airline/ann.py index c4135688..a5cad526 100644 --- a/airline/ann.py +++ b/airline/ann.py @@ -38,7 +38,7 @@ class ANN(object): def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes - def fit(self, X, Y, activation=T.tanh, learning_rate=10e-4, mu=0.5, reg=0, epochs=5000, batch_sz=None, print_period=100, show_fig=True): + def fit(self, X, Y, activation=T.tanh, learning_rate=1e-3, mu=0.5, reg=0, epochs=5000, batch_sz=None, print_period=100, show_fig=True): X = X.astype(np.float32) Y = Y.astype(np.float32) diff --git a/airline/rnn.py b/airline/rnn.py index 3335a4cd..ff11b2d1 100644 --- a/airline/rnn.py +++ b/airline/rnn.py @@ -29,7 +29,7 @@ class RNN(object): def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes - def fit(self, X, Y, activation=T.tanh, learning_rate=10e-2, mu=0.5, reg=0, epochs=2000, show_fig=False): + def fit(self, X, Y, activation=T.tanh, learning_rate=1e-1, mu=0.5, reg=0, epochs=2000, show_fig=False): N, t, D = X.shape self.hidden_layers = [] diff --git a/ann_class2/extra_reading.txt b/ann_class2/extra_reading.txt index b1cb2cfe..356dd690 100644 --- a/ann_class2/extra_reading.txt +++ b/ann_class2/extra_reading.txt @@ -18,4 +18,8 @@ Xavier (Glorot) Normal Initializer http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf He Normal Initializer -http://arxiv.org/abs/1502.01852 \ No newline at end of file +http://arxiv.org/abs/1502.01852 + +For understanding Nesterov Momentum: +Advances in optimizing Recurrent Networks by Yoshua Bengio, Section 3.5 +http://arxiv.org/pdf/1212.0901v2.pdf \ No newline at end of file diff --git a/bayesian_ml/3/run.py b/bayesian_ml/3/run.py index 5325c15e..72d9d8ee 100644 --- a/bayesian_ml/3/run.py +++ b/bayesian_ml/3/run.py @@ -66,7 +66,7 @@ def objective(X, Y, C, mu, a, b, e, f, a0, b0, e0, f0): # e3 = gamma_dist.entropy(e, scale=1.0/f) # e4 = -e_ln_q_gamma(e, f) # print "e3:", e3, "e4:", e4 - # assert(np.abs(e3 - e4) < 10e-8) + # assert(np.abs(e3 - e4) < 1e-8) total += gamma_dist.entropy(e, scale=1.0/f) # total -= e_ln_q_gamma(e, f) # print "total after lnq(lambda):", total diff --git a/cnn_class/custom_blur.py b/cnn_class/custom_blur.py index 9c93c91c..e7c3749d 100644 --- a/cnn_class/custom_blur.py +++ b/cnn_class/custom_blur.py @@ -91,3 +91,13 @@ def convolve2d(X, W): print(out.shape) # after convolution, the output signal is N1 + N2 - 1 +# try it in color +out = np.zeros(img.shape) +W /= W.sum() +for i in range(3): + out[:,:,i] = convolve2d(img[:,:,i], W) +plt.imshow(out) +plt.show() + + + diff --git a/logistic_regression_class/logistic3.py b/logistic_regression_class/logistic3.py index a021ef51..930d9d26 100644 --- a/logistic_regression_class/logistic3.py +++ b/logistic_regression_class/logistic3.py @@ -16,17 +16,19 @@ N = 100 D = 2 +N_per_class = N//2 + X = np.random.randn(N,D) # center the first 50 points at (-2,-2) -X[:50,:] = X[:50,:] - 2*np.ones((50,D)) +X[:N_per_class,:] = X[:N_per_class,:] - 2*np.ones((N_per_class,D)) # center the last 50 points at (2, 2) -X[50:,:] = X[50:,:] + 2*np.ones((50,D)) +X[N_per_class:,:] = X[N_per_class:,:] + 2*np.ones((N_per_class,D)) -# labels: first 50 are 0, last 50 are 1 -T = np.array([0]*50 + [1]*50) +# labels: first N_per_class are 0, last N_per_class are 1 +T = np.array([0]*N_per_class + [1]*N_per_class) # add a column of ones # ones = np.array([[1]*N]).T # old diff --git a/nlp_class2/glove.py b/nlp_class2/glove.py index f48e2a94..5954ceba 100644 --- a/nlp_class2/glove.py +++ b/nlp_class2/glove.py @@ -223,7 +223,7 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, for i in xrange(V): # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(U[j], U[j]) for j in xrange(V)), axis=0) matrix = reg*np.eye(D) + (fX[i,:]*U.T).dot(U) - # assert(np.abs(matrix - matrix2).sum() < 10e-5) + # assert(np.abs(matrix - matrix2).sum() < 1e-5) vector = (fX[i,:]*(logX[i,:] - b[i] - c - mu)).dot(U) W[i] = np.linalg.solve(matrix, vector) # print "fast way took:", (datetime.now() - t0) @@ -238,8 +238,8 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # vector2 += fX[i,j]*(logX[i,j] - b[i] - c[j])*U[j] # print "slow way took:", (datetime.now() - t0) - # assert(np.abs(matrix - matrix2).sum() < 10e-5) - # assert(np.abs(vector - vector2).sum() < 10e-5) + # assert(np.abs(matrix - matrix2).sum() < 1e-5) + # assert(np.abs(vector - vector2).sum() < 1e-5) # W[i] = np.linalg.solve(matrix, vector) # print "updated W" @@ -257,7 +257,7 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, for j in xrange(V): # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(W[i], W[i]) for i in xrange(V)), axis=0) matrix = reg*np.eye(D) + (fX[:,j]*W.T).dot(W) - # assert(np.abs(matrix - matrix2).sum() < 10e-8) + # assert(np.abs(matrix - matrix2).sum() < 1e-8) vector = (fX[:,j]*(logX[:,j] - b - c[j] - mu)).dot(W) # matrix = reg*np.eye(D) # vector = 0 @@ -323,7 +323,7 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): model.fit( sentences, cc_matrix=cc_matrix, - learning_rate=3*10e-5, + learning_rate=3e-4, reg=0.1, epochs=10, gd=True, diff --git a/nlp_class2/rntn_tensorflow.py b/nlp_class2/rntn_tensorflow.py index b1571445..7396a23c 100644 --- a/nlp_class2/rntn_tensorflow.py +++ b/nlp_class2/rntn_tensorflow.py @@ -68,7 +68,7 @@ def __init__(self, V, D, K, activation): self.bo = tf.Variable(bo.astype(np.float32)) self.params = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.Wo] - def fit(self, trees, lr=10e-3, mu=0.9, reg=10e-2, epochs=5): + def fit(self, trees, lr=1e-2, mu=0.9, reg=1e-1, epochs=5): train_ops = [] costs = [] predictions = [] diff --git a/nlp_class2/util.py b/nlp_class2/util.py index 25ad7f6f..b6d94586 100644 --- a/nlp_class2/util.py +++ b/nlp_class2/util.py @@ -1,6 +1,13 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import numpy as np def init_weight(Mi, Mo): @@ -21,15 +28,15 @@ def dist2(a, b): for dist, name in [(dist1, 'Euclidean'), (dist2, 'cosine')]: min_dist = float('inf') best_word = '' - for word, idx in word2idx.iteritems(): + for word, idx in iteritems(word2idx): if word not in (w1, w2, w3): v1 = We[idx] d = dist(v0, v1) if d < min_dist: min_dist = d best_word = word - print "closest match by", name, "distance:", best_word - print w1, "-", w2, "=", best_word, "-", w3 + print("closest match by", name, "distance:", best_word) + print(w1, "-", w2, "=", best_word, "-", w3) class Tree: @@ -43,9 +50,9 @@ def __init__(self, word, label): def display_tree(t, lvl=0): prefix = ''.join(['>']*lvl) if t.word is not None: - print "%s%s %s" % (prefix, t.label, t.word) + print("%s%s %s" % (prefix, t.label, t.word)) else: - print "%s%s -" % (prefix, t.label) + print("%s%s -" % (prefix, t.label)) # if t.left is None or t.right is None: # raise Exception("Tree node has no word but left and right child are None") if t.left: diff --git a/rl2/atari/dqn_tf.py b/rl2/atari/dqn_tf.py index a2c7e06c..64acbfb6 100644 --- a/rl2/atari/dqn_tf.py +++ b/rl2/atari/dqn_tf.py @@ -100,7 +100,7 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope): cost = tf.reduce_mean(tf.square(self.G - selected_action_values)) # self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost) # self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost) - # self.train_op = tf.train.RMSPropOptimizer(2.5e-4, decay=0.99, epsilon=10e-3).minimize(cost) + # self.train_op = tf.train.RMSPropOptimizer(2.5e-4, decay=0.99, epsilon=1e-3).minimize(cost) self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost) # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost) # self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost) diff --git a/rl2/atari/dqn_tf_alt.py b/rl2/atari/dqn_tf_alt.py index a94e59d0..aaf3e4ef 100644 --- a/rl2/atari/dqn_tf_alt.py +++ b/rl2/atari/dqn_tf_alt.py @@ -165,11 +165,11 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): cost = tf.reduce_mean(tf.square(self.G - selected_action_values)) self.cost = cost - # self.train_op = tf.train.AdamOptimizer(10e-3).minimize(cost) - # self.train_op = tf.train.AdagradOptimizer(10e-3).minimize(cost) + # self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost) + # self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost) self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost) - # self.train_op = tf.train.MomentumOptimizer(10e-4, momentum=0.9).minimize(cost) - # self.train_op = tf.train.GradientDescentOptimizer(10e-5).minimize(cost) + # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost) + # self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost) def set_session(self, session): self.session = session diff --git a/unsupervised_class/kmeans.py b/unsupervised_class/kmeans.py index 92bef6b1..16d75d31 100644 --- a/unsupervised_class/kmeans.py +++ b/unsupervised_class/kmeans.py @@ -50,7 +50,7 @@ def plot_k_means(X, K, max_iter=20, beta=1.0, show_plots=True): exponents[n,k] = np.exp(-beta*d(M[k], X[n])) R = exponents / exponents.sum(axis=1, keepdims=True) - # assert(np.abs(R - R2).sum() < 10e-10) + # assert(np.abs(R - R2).sum() < 1e-10) # step 2: recalculate means for k in range(K): diff --git a/unsupervised_class/kmeans_visualize.py b/unsupervised_class/kmeans_visualize.py index d6fea4d7..d10b8503 100644 --- a/unsupervised_class/kmeans_visualize.py +++ b/unsupervised_class/kmeans_visualize.py @@ -57,7 +57,7 @@ def plot_k_means(X, K, max_iter=20, beta=1.0): costs[i] = cost(X, R, M) if i > 0: - if np.abs(costs[i] - costs[i-1]) < 10e-5: + if np.abs(costs[i] - costs[i-1]) < 1e-5: break plt.show() diff --git a/unsupervised_class2/autoencoder_tf.py b/unsupervised_class2/autoencoder_tf.py index 40674269..08c3f156 100644 --- a/unsupervised_class2/autoencoder_tf.py +++ b/unsupervised_class2/autoencoder_tf.py @@ -43,7 +43,7 @@ def build(self, D, M): ) self.train_op = tf.train.AdamOptimizer(1e-1).minimize(self.cost) - # self.train_op = tf.train.MomentumOptimizer(10e-4, momentum=0.9).minimize(self.cost) + # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(self.cost) def fit(self, X, epochs=1, batch_sz=100, show_fig=False): N, D = X.shape From 244d1b6cdfea719cc6493fd6607747f7644e4384 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Thu, 28 Dec 2017 23:07:40 -0500 Subject: [PATCH 004/329] python 3 --- nlp_class2/glove.py | 100 +++++++++++---------- nlp_class2/ner_baseline.py | 26 +++--- nlp_class2/ner_rnn.py | 12 ++- nlp_class2/pos_baseline.py | 61 +++++++++---- nlp_class2/pos_hmm.py | 16 ++-- nlp_class2/pos_rnn.py | 29 +++++-- nlp_class2/recursive_tensorflow.py | 29 +++++-- nlp_class2/recursive_theano.py | 87 ++++++++++++------- nlp_class2/rntn_tensorflow.py | 24 ++++-- nlp_class2/rntn_theano.py | 134 ++++++++++++++++++++++------- nlp_class2/tfidf_tsne.py | 19 ++-- nlp_class2/util.py | 17 +++- nlp_class2/visualize_countries.py | 8 +- nlp_class2/word2vec.py | 74 +++++++++++----- 14 files changed, 434 insertions(+), 202 deletions(-) diff --git a/nlp_class2/glove.py b/nlp_class2/glove.py index 5954ceba..0fb996bc 100644 --- a/nlp_class2/glove.py +++ b/nlp_class2/glove.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import os import json import numpy as np @@ -41,14 +47,14 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, if not os.path.exists(cc_matrix): X = np.zeros((V, V)) N = len(sentences) - print "number of sentences to process:", N + print("number of sentences to process:", N) it = 0 for sentence in sentences: it += 1 if it % 10000 == 0: - print "processed", it, "/", N + print("processed", it, "/", N) n = len(sentence) - for i in xrange(n): + for i in range(n): # i is not the word index!!! # j is not the word index!!! # i just points to which element of the sequence (sentence) we're looking at @@ -72,14 +78,14 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, X[1,wi] += points # left side - for j in xrange(start, i): + for j in range(start, i): wj = sentence[j] points = 1.0 / (i - j) # this is +ve X[wi,wj] += points X[wj,wi] += points # right side - for j in xrange(i + 1, end): + for j in range(i + 1, end): wj = sentence[j] points = 1.0 / (j - i) # this is +ve X[wi,wj] += points @@ -90,21 +96,21 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, else: X = np.load(cc_matrix) - print "max in X:", X.max() + print("max in X:", X.max()) # weighting fX = np.zeros((V, V)) fX[X < xmax] = (X[X < xmax] / float(xmax)) ** alpha fX[X >= xmax] = 1 - print "max in f(X):", fX.max() + print("max in f(X):", fX.max()) # target logX = np.log(X + 1) - print "max in log(X):", logX.max() + print("max in log(X):", logX.max()) - print "time to build co-occurrence matrix:", (datetime.now() - t0) + print("time to build co-occurrence matrix:", (datetime.now() - t0)) # initialize weights W = np.random.randn(V, D) / np.sqrt(V + D) @@ -160,11 +166,11 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, costs = [] sentence_indexes = range(len(sentences)) - for epoch in xrange(epochs): + for epoch in range(epochs): delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX cost = ( fX * delta * delta ).sum() costs.append(cost) - print "epoch:", epoch, "cost:", cost + print("epoch:", epoch, "cost:", cost) if gd: # gradient descent method @@ -183,32 +189,32 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, else: # update W oldW = W.copy() - for i in xrange(V): - # for j in xrange(V): + for i in range(V): + # for j in range(V): # W[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*U[j] W[i] -= learning_rate*(fX[i,:]*delta[i,:]).dot(U) W -= learning_rate*reg*W # print "updated W" # update b - for i in xrange(V): - # for j in xrange(V): + for i in range(V): + # for j in range(V): # b[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j]) b[i] -= learning_rate*fX[i,:].dot(delta[i,:]) b -= learning_rate*reg*b # print "updated b" # update U - for j in xrange(V): - # for i in xrange(V): + for j in range(V): + # for i in range(V): # U[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*W[i] U[j] -= learning_rate*(fX[:,j]*delta[:,j]).dot(oldW) U -= learning_rate*reg*U # print "updated U" # update c - for j in xrange(V): - # for i in xrange(V): + for j in range(V): + # for i in range(V): # c[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j]) c[j] -= learning_rate*fX[:,j].dot(delta[:,j]) c -= learning_rate*reg*c @@ -220,8 +226,8 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # update W # fast way # t0 = datetime.now() - for i in xrange(V): - # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(U[j], U[j]) for j in xrange(V)), axis=0) + for i in range(V): + # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(U[j], U[j]) for j in range(V)), axis=0) matrix = reg*np.eye(D) + (fX[i,:]*U.T).dot(U) # assert(np.abs(matrix - matrix2).sum() < 1e-5) vector = (fX[i,:]*(logX[i,:] - b[i] - c - mu)).dot(U) @@ -230,10 +236,10 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # slow way # t0 = datetime.now() - # for i in xrange(V): + # for i in range(V): # matrix2 = reg*np.eye(D) # vector2 = 0 - # for j in xrange(V): + # for j in range(V): # matrix2 += fX[i,j]*np.outer(U[j], U[j]) # vector2 += fX[i,j]*(logX[i,j] - b[i] - c[j])*U[j] # print "slow way took:", (datetime.now() - t0) @@ -244,34 +250,34 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # print "updated W" # update b - for i in xrange(V): + for i in range(V): denominator = fX[i,:].sum() # assert(denominator > 0) numerator = fX[i,:].dot(logX[i,:] - W[i].dot(U.T) - c - mu) - # for j in xrange(V): + # for j in range(V): # numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - c[j]) b[i] = numerator / denominator / (1 + reg) # print "updated b" # update U - for j in xrange(V): - # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(W[i], W[i]) for i in xrange(V)), axis=0) + for j in range(V): + # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(W[i], W[i]) for i in range(V)), axis=0) matrix = reg*np.eye(D) + (fX[:,j]*W.T).dot(W) # assert(np.abs(matrix - matrix2).sum() < 1e-8) vector = (fX[:,j]*(logX[:,j] - b - c[j] - mu)).dot(W) # matrix = reg*np.eye(D) # vector = 0 - # for i in xrange(V): + # for i in range(V): # matrix += fX[i,j]*np.outer(W[i], W[i]) # vector += fX[i,j]*(logX[i,j] - b[i] - c[j])*W[i] U[j] = np.linalg.solve(matrix, vector) # print "updated U" # update c - for j in xrange(V): + for j in range(V): denominator = fX[:,j].sum() numerator = fX[:,j].dot(logX[:,j] - W.dot(U[j]) - b - mu) - # for i in xrange(V): + # for i in range(V): # numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - b[i]) c[j] = numerator / denominator / (1 + reg) # print "updated c" @@ -319,28 +325,28 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): V = len(word2idx) model = Glove(100, V, 10) - # model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # ALS - model.fit( - sentences, - cc_matrix=cc_matrix, - learning_rate=3e-4, - reg=0.1, - epochs=10, - gd=True, - use_theano=False, - use_tensorflow=True, - ) + model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # ALS + # model.fit( + # sentences, + # cc_matrix=cc_matrix, + # learning_rate=3e-4, + # reg=0.1, + # epochs=10, + # gd=True, + # use_theano=False, + # use_tensorflow=True, + # ) model.save(we_file) if __name__ == '__main__': - # we = 'glove_model_50.npz' - # w2i = 'glove_word2idx_50.json' - we = 'glove_model_brown.npz' - w2i = 'glove_word2idx_brown.json' - main(we, w2i, use_brown=True) + we = 'glove_model_50.npz' + w2i = 'glove_word2idx_50.json' + # we = 'glove_model_brown.npz' + # w2i = 'glove_word2idx_brown.json' + main(we, w2i, use_brown=False) for concat in (True, False): - print "** concat:", concat + print("** concat:", concat) find_analogies('king', 'man', 'woman', concat, we, w2i) find_analogies('france', 'paris', 'london', concat, we, w2i) find_analogies('france', 'paris', 'rome', concat, we, w2i) diff --git a/nlp_class2/ner_baseline.py b/nlp_class2/ner_baseline.py index 219b861a..572332c8 100644 --- a/nlp_class2/ner_baseline.py +++ b/nlp_class2/ner_baseline.py @@ -5,6 +5,12 @@ # data from https://github.com/aritter/twitter_nlp/blob/master/data/annotated/ner.txt # data2 from http://schwa.org/projects/resources/wiki/Wikiner#WikiGold +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import numpy as np from sklearn.utils import shuffle from pos_baseline import LogisticRegression @@ -43,14 +49,14 @@ def get_data(split_sequences=False): Xtrain = currentX Ytrain = currentY - print "number of samples:", len(Xtrain) + print("number of samples:", len(Xtrain)) Xtrain, Ytrain = shuffle(Xtrain, Ytrain) Ntest = int(0.3*len(Xtrain)) Xtest = Xtrain[:Ntest] Ytest = Ytrain[:Ntest] Xtrain = Xtrain[Ntest:] Ytrain = Ytrain[Ntest:] - print "number of classes:", len(tag2idx) + print("number of classes:", len(tag2idx)) return Xtrain, Ytrain, Xtest, Ytest, word2idx, tag2idx @@ -88,14 +94,14 @@ def get_data(split_sequences=False): # Xtrain = np.concatenate(Xtrain) # Ytrain = np.concatenate(Ytrain) -# print "number of samples:", len(Xtrain) +# print("number of samples:", len(Xtrain)) # Xtrain, Ytrain = shuffle(Xtrain, Ytrain) # Ntest = int(0.3*len(Xtrain)) # Xtest = Xtrain[:Ntest] # Ytest = Ytrain[:Ntest] # Xtrain = Xtrain[Ntest:] # Ytrain = Ytrain[Ntest:] -# print "number of classes:", len(tag2idx) +# print("number of classes:", len(tag2idx)) # return Xtrain, Ytrain, Xtest, Ytest, word2idx, tag2idx @@ -103,17 +109,17 @@ def main(): Xtrain, Ytrain, Xtest, Ytest, word2idx, tag2idx = get_data() V = len(word2idx) - print "vocabulary size:", V + print("vocabulary size:", V) K = len(tag2idx) # train and score model = LogisticRegression() model.fit(Xtrain, Ytrain, V=V, K=K, epochs=5) - print "training complete" - print "train score:", model.score(Xtrain, Ytrain) - print "train f1 score:", model.f1_score(Xtrain, Ytrain) - print "test score:", model.score(Xtest, Ytest) - print "test f1 score:", model.f1_score(Xtest, Ytest) + print("training complete") + print("train score:", model.score(Xtrain, Ytrain)) + print("train f1 score:", model.f1_score(Xtrain, Ytrain)) + print("test score:", model.score(Xtest, Ytest)) + print("test f1 score:", model.f1_score(Xtest, Ytest)) if __name__ == '__main__': main() diff --git a/nlp_class2/ner_rnn.py b/nlp_class2/ner_rnn.py index 80113f53..026914ef 100644 --- a/nlp_class2/ner_rnn.py +++ b/nlp_class2/ner_rnn.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + from ner_baseline import get_data from pos_rnn import RNN @@ -10,8 +16,10 @@ def main(): K = len(tag2idx) rnn = RNN(10, [10], V, K) rnn.fit(Xtrain, Ytrain, epochs=70) - print "train f1 score:", rnn.f1_score(Xtrain, Ytrain) - print "test f1 score:", rnn.f1_score(Xtest, Ytest) + print("train score:", rnn.score(Xtrain, Ytrain)) + print("test score:", rnn.score(Xtest, Ytest)) + print("train f1 score:", rnn.f1_score(Xtrain, Ytrain)) + print("test f1 score:", rnn.f1_score(Xtest, Ytest)) if __name__ == '__main__': diff --git a/nlp_class2/pos_baseline.py b/nlp_class2/pos_baseline.py index c1e8e4d4..c33bd039 100644 --- a/nlp_class2/pos_baseline.py +++ b/nlp_class2/pos_baseline.py @@ -2,10 +2,16 @@ # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python -# You can get the data from this URL: http://www.cnts.ua.ac.be/conll2000/chunking/ +# You can get the data from this URL: https://www.clips.uantwerpen.be/conll2000/chunking/ # If above URL does not work, try this: # https://drive.google.com/file/d/0BxGV7C-8DTe5QmF2MTFwN3JjWGc/view?usp=sharing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import os, sys import numpy as np import theano import theano.tensor as T @@ -60,18 +66,23 @@ def fit(self, X, Y, V=None, K=None, D=50, lr=1e-1, mu=0.99, batch_sz=100, epochs ) costs = [] - n_batches = N / batch_sz - for i in xrange(epochs): + n_batches = N // batch_sz + for i in range(epochs): X, Y = shuffle(X, Y) - print "epoch:", i - for j in xrange(n_batches): + print("epoch:", i) + for j in range(n_batches): Xbatch = X[j*batch_sz:(j*batch_sz + batch_sz)] Ybatch = Y[j*batch_sz:(j*batch_sz + batch_sz)] c, p = train_op(Xbatch, Ybatch) costs.append(c) if j % 200 == 0: - print "i:", i, "j:", j, "n_batches:", n_batches, "cost:", c, "error:", np.mean(p != Ybatch) + print( + "i:", i, "j:", j, + "n_batches:", n_batches, + "cost:", c, + "error:", np.mean(p != Ybatch) + ) plt.plot(costs) plt.show() @@ -85,6 +96,20 @@ def f1_score(self, X, Y): def get_data(split_sequences=False): + if not os.path.exists('chunking'): + print("Please create a folder in your local directory called 'chunking'") + print("train.txt and test.txt should be stored in there.") + print("Please check the comments to get the download link.") + exit() + elif not os.path.exists('chunking/train.txt'): + print("train.txt is not in chunking/train.txt") + print("Please check the comments to get the download link.") + exit() + elif not os.path.exists('chunking/test.txt'): + print("test.txt is not in chunking/test.txt") + print("Please check the comments to get the download link.") + exit() + word2idx = {} tag2idx = {} word_idx = 0 @@ -153,7 +178,7 @@ def main(): # convert Xtrain to indicator matrix N = len(Xtrain) V = len(word2idx) + 1 - print "vocabulary size:", V + print("vocabulary size:", V) # Xtrain_indicator = np.zeros((N, V)) # Xtrain_indicator[np.arange(N), Xtrain] = 1 @@ -162,20 +187,20 @@ def main(): # without indicator dt.fit(Xtrain.reshape(N, 1), Ytrain) - print "dt train score:", dt.score(Xtrain.reshape(N, 1), Ytrain) + print("dt train score:", dt.score(Xtrain.reshape(N, 1), Ytrain)) p = dt.predict(Xtrain.reshape(N, 1)) - print "dt train f1:", f1_score(Ytrain, p, average=None).mean() + print("dt train f1:", f1_score(Ytrain, p, average=None).mean()) # with indicator -- too slow!! # dt.fit(Xtrain_indicator, Ytrain) - # print "dt score:", dt.score(Xtrain_indicator, Ytrain) + # print("dt score:", dt.score(Xtrain_indicator, Ytrain)) # train and score model = LogisticRegression() model.fit(Xtrain, Ytrain, V=V) - print "training complete" - print "lr train score:", model.score(Xtrain, Ytrain) - print "lr train f1:", model.f1_score(Xtrain, Ytrain) + print("training complete") + print("lr train score:", model.score(Xtrain, Ytrain)) + print("lr train f1:", model.f1_score(Xtrain, Ytrain)) Ntest = len(Xtest) @@ -186,14 +211,14 @@ def main(): # Xtest_indicator[np.arange(Ntest), Xtest] = 1 # decision tree test score - print "dt test score:", dt.score(Xtest.reshape(Ntest, 1), Ytest) + print("dt test score:", dt.score(Xtest.reshape(Ntest, 1), Ytest)) p = dt.predict(Xtest.reshape(Ntest, 1)) - print "dt test f1:", f1_score(Ytest, p, average=None).mean() - # print "dt test score:", dt.score(Xtest_indicator, Ytest) # too slow! + print("dt test f1:", f1_score(Ytest, p, average=None).mean()) + # print("dt test score:", dt.score(Xtest_indicator, Ytest)) # too slow! # logistic test score -- too slow!! - print "lr test score:", model.score(Xtest, Ytest) - print "lr test f1:", model.f1_score(Xtest, Ytest) + print("lr test score:", model.score(Xtest, Ytest)) + print("lr test f1:", model.f1_score(Xtest, Ytest)) if __name__ == '__main__': main() diff --git a/nlp_class2/pos_hmm.py b/nlp_class2/pos_hmm.py index ebc17be2..e3065cd2 100644 --- a/nlp_class2/pos_hmm.py +++ b/nlp_class2/pos_hmm.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import numpy as np import matplotlib.pyplot as plt @@ -47,7 +53,7 @@ def main(smoothing=1e-1): pi = np.zeros(M) for y in Ytrain: pi[y[0]] += 1 - for i in xrange(len(y)-1): + for i in range(len(y)-1): A[y[i], y[i+1]] += 1 # turn it into a probability matrix A /= A.sum(axis=1, keepdims=True) @@ -77,10 +83,10 @@ def main(smoothing=1e-1): Ptest.append(p) # print results - print "train accuracy:", accuracy(Ytrain, Ptrain) - print "test accuracy:", accuracy(Ytest, Ptest) - print "train f1:", total_f1_score(Ytrain, Ptrain) - print "test f1:", total_f1_score(Ytest, Ptest) + print("train accuracy:", accuracy(Ytrain, Ptrain)) + print("test accuracy:", accuracy(Ytest, Ptest)) + print("train f1:", total_f1_score(Ytrain, Ptrain)) + print("test f1:", total_f1_score(Ytest, Ptest)) if __name__ == '__main__': main() diff --git a/nlp_class2/pos_rnn.py b/nlp_class2/pos_rnn.py index 1f69b31b..714297f3 100644 --- a/nlp_class2/pos_rnn.py +++ b/nlp_class2/pos_rnn.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import numpy as np import matplotlib.pyplot as plt import theano @@ -59,7 +65,7 @@ def fit(self, X, Y, learning_rate=1e-4, mu=0.99, epochs=30, show_fig=True, activ outputs=py_x, ) testout = testf(X[0]) - print "py_x.shape:", testout.shape + print("py_x.shape:", testout.shape) prediction = T.argmax(py_x, axis=1) @@ -97,7 +103,7 @@ def fit(self, X, Y, learning_rate=1e-4, mu=0.99, epochs=30, show_fig=True, activ costs = [] sequence_indexes = range(N) n_total = sum(len(y) for y in Y) - for i in xrange(epochs): + for i in range(epochs): t0 = datetime.now() sequence_indexes = shuffle(sequence_indexes) n_correct = 0 @@ -109,9 +115,16 @@ def fit(self, X, Y, learning_rate=1e-4, mu=0.99, epochs=30, show_fig=True, activ n_correct += np.sum(p == Y[j]) it += 1 if it % 200 == 0: - sys.stdout.write("j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % (it, N, float(n_correct)/n_total, cost)) + sys.stdout.write( + "j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % + (it, N, float(n_correct)/n_total, cost) + ) sys.stdout.flush() - print "i:", i, "cost:", cost, "correct rate:", (float(n_correct)/n_total), "time for epoch:", (datetime.now() - t0) + print( + "i:", i, "cost:", cost, + "correct rate:", (float(n_correct)/n_total), + "time for epoch:", (datetime.now() - t0) + ) costs.append(cost) if show_fig: @@ -146,10 +159,10 @@ def main(): K = len(set(flatten(Ytrain)) | set(flatten(Ytest))) rnn = RNN(10, [10], V, K) rnn.fit(Xtrain, Ytrain) - print "train score:", rnn.score(Xtrain, Ytrain) - print "test score:", rnn.score(Xtest, Ytest) - print "train f1:", rnn.f1_score(Xtrain, Ytrain) - print "test f1:", rnn.f1_score(Xtest, Ytest) + print("train score:", rnn.score(Xtrain, Ytrain)) + print("test score:", rnn.score(Xtest, Ytest)) + print("train f1:", rnn.f1_score(Xtrain, Ytrain)) + print("test f1:", rnn.f1_score(Xtest, Ytest)) if __name__ == '__main__': diff --git a/nlp_class2/recursive_tensorflow.py b/nlp_class2/recursive_tensorflow.py index df5beba3..4c43e3df 100644 --- a/nlp_class2/recursive_tensorflow.py +++ b/nlp_class2/recursive_tensorflow.py @@ -2,6 +2,11 @@ # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python # data is from: http://nlp.stanford.edu/sentiment/ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + import sys import tensorflow as tf @@ -56,7 +61,7 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): all_labels = [] i = 0 N = len(trees) - print "Compiling ops" + print("Compiling ops") for t in trees: i += 1 sys.stdout.write("%d/%d\r" % (i, N)) @@ -86,7 +91,7 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): with tf.Session() as session: session.run(init) - for i in xrange(epochs): + for i in range(epochs): t0 = datetime.now() train_ops, costs, predictions, all_labels = shuffle(train_ops, costs, predictions, all_labels) @@ -107,7 +112,10 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): sys.stdout.write("j: %d, N: %d, c: %f\r" % (j, N, c)) sys.stdout.flush() - print "epoch:", i, "cost:", epoch_cost, "elapsed time:", (datetime.now() - t0) + print( + "epoch:", i, "cost:", epoch_cost, + "elapsed time:", (datetime.now() - t0) + ) per_epoch_costs.append(epoch_cost) correct_rates.append(n_correct / float(n_total)) @@ -127,7 +135,12 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): plt.show() def get_cost(self, logits, labels, reg): - cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels)) + cost = tf.reduce_mean( + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, + labels=labels + ) + ) rcost = sum(tf.nn.l2_loss(p) for p in self.params) cost += reg*rcost return cost @@ -159,7 +172,7 @@ def get_output(self, tree): # except Exception as e: # display_tree(tree) # raise e - return tf.concat(0, logits) + return tf.concat(logits, 0) def score(self, trees): if trees is None: @@ -173,7 +186,7 @@ def score(self, trees): i = 0 N = len(trees) - print "Compiling ops" + print("Compiling ops") for t in trees: i += 1 @@ -211,8 +224,8 @@ def main(): model = TNN(V, D, K, tf.nn.relu) model.fit(train) - print "train accuracy:", model.score(None) - print "test accuracy:", model.score(test) + print("train accuracy:", model.score(None)) + print("test accuracy:", model.score(test)) if __name__ == '__main__': diff --git a/nlp_class2/recursive_theano.py b/nlp_class2/recursive_theano.py index 2aa27a14..2241999b 100644 --- a/nlp_class2/recursive_theano.py +++ b/nlp_class2/recursive_theano.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import sys import numpy as np import matplotlib.pyplot as plt @@ -12,6 +18,19 @@ from datetime import datetime +def adagrad(cost, params, lr, eps=1e-10): + grads = T.grad(cost, params) + caches = [theano.shared(np.ones_like(p.get_value())) for p in params] + new_caches = [c + g*g for c, g in zip(caches, grads)] + + c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)] + g_update = [ + (p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(params, new_caches, grads) + ] + updates = c_update + g_update + return updates + + class RecursiveNN: def __init__(self, V, D, K): self.V = V @@ -97,14 +116,16 @@ def recurrence(n, hiddens, words, parents, relations): # cost = -T.mean(target * premean) cost = -T.mean(T.log(py_x[-1, labels[-1]])) + rcost - grads = T.grad(cost, self.params) - dparams = [theano.shared(p.get_value()*0) for p in self.params] + # grads = T.grad(cost, self.params) + # dparams = [theano.shared(p.get_value()*0) for p in self.params] - updates = [ - (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) - ] + [ - (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) - ] + # updates = [ + # (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) + # ] + [ + # (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) + # ] + + updates = adagrad(cost, self.params, lr=8e-3) self.cost_predict_op = theano.function( inputs=[words, parents, relations, labels], @@ -124,7 +145,7 @@ def recurrence(n, hiddens, words, parents, relations): n_total = sum(len(words) for words, _, _, _ in trees) else: n_total = N - for i in xrange(epochs): + for i in range(epochs): t0 = datetime.now() sequence_indexes = shuffle(sequence_indexes) n_correct = 0 @@ -132,16 +153,11 @@ def recurrence(n, hiddens, words, parents, relations): it = 0 for j in sequence_indexes: words, par, rel, lab = trees[j] - # print "len(words):", len(words) _, c, p = self.train_op(words, par, rel, lab) - # if h.shape[0] < 10: - # print h - # print "py_x.shape:", y.shape - # print "pre-mean shape:", pm.shape - # print "target shape:", t.shape - # exit() + if np.isnan(c): - print "Cost is nan! Let's stop here. Why don't you try decreasing the learning rate?" + print("Cost is nan! Let's stop here. \ + Why don't you try decreasing the learning rate?") exit() cost += c if train_inner_nodes: @@ -152,11 +168,15 @@ def recurrence(n, hiddens, words, parents, relations): if it % 1 == 0: sys.stdout.write("j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % (it, N, float(n_correct)/n_total, cost)) sys.stdout.flush() - print "i:", i, "cost:", cost, "correct rate:", (float(n_correct)/n_total), "time for epoch:", (datetime.now() - t0) + print( + "i:", i, "cost:", cost, + "correct rate:", (float(n_correct)/n_total), + "time for epoch:", (datetime.now() - t0) + ) costs.append(cost) plt.plot(costs) - plt.show() + plt.draw() # don't block later code def score(self, trees, idx2word=None): n_total = len(trees) @@ -166,8 +186,8 @@ def score(self, trees, idx2word=None): n_correct += (p[-1] == lab[-1]) # if idx2word: # print_sentence(words, idx2word) - # print "label:", lab[-1], "pred:", p[-1] - print "n_correct:", n_correct, "n_total:", n_total, + # print("label:", lab[-1], "pred:", p[-1]) + print("n_correct:", n_correct, "n_total:", n_total, end=" ") return float(n_correct) / n_total @@ -222,10 +242,10 @@ def tree2list(tree, parent_idx, is_binary=False, is_left=False, is_right=False): def print_sentence(words, idx2word): # sentence = ' '.join(get_sentence(tree)) - # print sentence, "label:", tree.label + # print(sentence, "label:", tree.label) for w in words: if w >= 0: - print idx2word[w], + print(idx2word[w], end=" ") def main(is_binary=True): @@ -249,26 +269,29 @@ def main(is_binary=True): test = [t for t in test if t[3][-1] >= 0] # for filtering binary labels train = shuffle(train) - train = train[:2000] + # train = train[:2000] n_pos = sum(t[3][-1] for t in train) - # print "num pos train:", n_pos - # idx2word = {v:k for k, v in word2idx.iteritems()} - # for i in xrange(4): + # print("num pos train:", n_pos) + # idx2word = {v:k for k, v in word2idx.items()} + # for i in range(4): # words, _, _, labels = train[i] # print_sentence(words, idx2word) - # print "label:", labels[-1] + # print("label:", labels[-1]) test = shuffle(test) - test = test[:100] + test = test[:1000] V = len(word2idx) - print "vocab size:", V + print("vocab size:", V) D = 10 K = 2 if is_binary else 5 model = RecursiveNN(V, D, K) - model.fit(train, learning_rate=1e-2, reg=1e-2, mu=0, epochs=30, activation=T.tanh, train_inner_nodes=False) - print "train accuracy:", model.score(train) - print "test accuracy:", model.score(test) + model.fit(train, learning_rate=1e-2, reg=1e-2, mu=0, epochs=20, activation=T.tanh, train_inner_nodes=False) + print("train accuracy:", model.score(train)) + print("test accuracy:", model.score(test)) + + # make sure program doesn't end until we close the plot + plt.show() if __name__ == '__main__': diff --git a/nlp_class2/rntn_tensorflow.py b/nlp_class2/rntn_tensorflow.py index 7396a23c..77b563f2 100644 --- a/nlp_class2/rntn_tensorflow.py +++ b/nlp_class2/rntn_tensorflow.py @@ -3,6 +3,12 @@ # https://udemy.com/natural-language-processing-with-deep-learning-in-python # data is from: http://nlp.stanford.edu/sentiment/ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import sys import tensorflow as tf import numpy as np @@ -75,7 +81,7 @@ def fit(self, trees, lr=1e-2, mu=0.9, reg=1e-1, epochs=5): all_labels = [] i = 0 N = len(trees) - print "Compiling ops" + print("Compiling ops") for t in trees: i += 1 sys.stdout.write("%d/%d\r" % (i, N)) @@ -105,7 +111,7 @@ def fit(self, trees, lr=1e-2, mu=0.9, reg=1e-1, epochs=5): with tf.Session() as session: session.run(init) - for i in xrange(epochs): + for i in range(epochs): train_ops, costs, predictions, all_labels = shuffle(train_ops, costs, predictions, all_labels) epoch_cost = 0 n_correct = 0 @@ -158,9 +164,9 @@ def get_output_recursive(self, tree, list_of_logits, is_root=True): x = tf.nn.embedding_lookup(self.We, [tree.word]) else: # if tree.left is None or tree.right is None: - # print "This tree node has no word but also has no children:" + # print("This tree node has no word but also has no children:") # display_tree(tree) - # print "" + # print("") # this node has children x1 = self.get_output_recursive(tree.left, list_of_logits, is_root=False) x2 = self.get_output_recursive(tree.right, list_of_logits, is_root=False) @@ -198,7 +204,7 @@ def score(self, trees): i = 0 N = len(trees) - print "Compiling ops" + print("Compiling ops") for t in trees: i += 1 @@ -219,8 +225,8 @@ def score(self, trees): for prediction, y in zip(predictions, all_labels): p = session.run(prediction) - # print "pred:", p - # print "label:", y + # print("pred:", p) + # print("label:", y) # n_correct += np.sum(p == y) n_correct += (p[-1] == y[-1]) # we only care about the root n_total += len(y) @@ -240,8 +246,8 @@ def main(): model = RNTN(V, D, K, tf.nn.relu) model.fit(train) - print "train accuracy:", model.score(None) - print "test accuracy:", model.score(test) + print("train accuracy:", model.score(None)) + print("test accuracy:", model.score(test)) if __name__ == '__main__': diff --git a/nlp_class2/rntn_theano.py b/nlp_class2/rntn_theano.py index f8e3529d..d4fbb41d 100644 --- a/nlp_class2/rntn_theano.py +++ b/nlp_class2/rntn_theano.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import sys import numpy as np import matplotlib.pyplot as plt @@ -13,17 +19,79 @@ from sklearn.metrics import f1_score +# helper for adam optimizer +# use tensorflow defaults +# def adam(cost, params, lr0=1e-4, beta1=0.9, beta2=0.999, eps=1e-8): +# grads = T.grad(cost, params) +# updates = [] +# time = theano.shared(0) +# new_time = time + 1 +# updates.append((time, new_time)) +# lr = lr0*T.sqrt(1 - beta2**new_time) / (1 - beta1**new_time) +# for p, g in zip(params, grads): +# m = theano.shared(p.get_value() * 0.) +# v = theano.shared(p.get_value() * 0.) +# new_m = beta1*m + (1 - beta1)*g +# new_v = beta2*v + (1 - beta2)*g*g +# new_p = p - lr*new_m / (T.sqrt(new_v) + eps) +# updates.append((m, new_m)) +# updates.append((v, new_v)) +# updates.append((p, new_p)) +# return updates + + +# def momentum_updates(cost, params, learning_rate=1e-3, mu=0.99): +# # momentum changes +# dparams = [theano.shared(p.get_value() * 0.) for p in params] + +# updates = [] +# grads = T.grad(cost, params) +# for p, dp, g in zip(params, dparams, grads): +# dp_update = mu*dp - learning_rate*g +# p_update = p + dp_update + +# updates.append((dp, dp_update)) +# updates.append((p, p_update)) +# return updates + + +# def rmsprop(cost, params, lr=1e-3, decay=0.999, eps=1e-10): +# grads = T.grad(cost, params) +# caches = [theano.shared(np.ones_like(p.get_value())) for p in params] +# new_caches = [decay*c + (1. - decay)*g*g for c, g in zip(caches, grads)] + +# c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)] +# g_update = [ +# (p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(params, new_caches, grads) +# ] +# updates = c_update + g_update +# return updates + + +def adagrad(cost, params, lr, eps=1e-10): + grads = T.grad(cost, params) + caches = [theano.shared(np.ones_like(p.get_value())) for p in params] + new_caches = [c + g*g for c, g in zip(caches, grads)] + + c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)] + g_update = [ + (p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(params, new_caches, grads) + ] + updates = c_update + g_update + return updates + + class RecursiveNN: - def __init__(self, V, D, K): + def __init__(self, V, D, K, activation=T.tanh): self.V = V self.D = D self.K = K + self.f = activation - def fit(self, trees, learning_rate=1e-3, mu=0.5, reg=1e-2, eps=1e-2, epochs=20, activation=T.tanh, train_inner_nodes=False): + def fit(self, trees, reg=1e-3, epochs=8, train_inner_nodes=False): D = self.D V = self.V K = self.K - self.f = activation N = len(trees) We = init_weight(V, D) @@ -47,6 +115,7 @@ def fit(self, trees, learning_rate=1e-3, mu=0.5, reg=1e-2, eps=1e-2, epochs=20, self.bo = theano.shared(bo) self.params = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.bh, self.Wo, self.bo] + lr = T.scalar('learning_rate') words = T.ivector('words') left_children = T.ivector('left_children') right_children = T.ivector('right_children') @@ -85,26 +154,14 @@ def recurrence(n, hiddens, words, left, right): prediction = T.argmax(py_x, axis=1) - rcost = reg*T.mean([(p*p).sum() for p in self.params]) + rcost = reg*T.sum([(p*p).sum() for p in self.params]) if train_inner_nodes: cost = -T.mean(T.log(py_x[T.arange(labels.shape[0]), labels])) + rcost else: cost = -T.mean(T.log(py_x[-1, labels[-1]])) + rcost - grads = T.grad(cost, self.params) - # dparams = [theano.shared(p.get_value()*0) for p in self.params] - cache = [theano.shared(p.get_value()*0) for p in self.params] - - # momentum - # updates = [ - # (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) - # ] + [ - # (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) - # ] - updates = [ - (c, c + g*g) for c, g in zip(cache, grads) - ] + [ - (p, p - learning_rate*g / T.sqrt(c + eps)) for p, c, g in zip(self.params, cache, grads) - ] + + + updates = adagrad(cost, self.params, lr) self.cost_predict_op = theano.function( inputs=[words, left_children, right_children, labels], @@ -113,18 +170,19 @@ def recurrence(n, hiddens, words, left, right): ) self.train_op = theano.function( - inputs=[words, left_children, right_children, labels], + inputs=[words, left_children, right_children, labels, lr], outputs=[cost, prediction], updates=updates ) + lr_ = 8e-3 # initial learning rate costs = [] sequence_indexes = range(N) if train_inner_nodes: n_total = sum(len(words) for words, _, _, _ in trees) else: n_total = N - for i in xrange(epochs): + for i in range(epochs): t0 = datetime.now() sequence_indexes = shuffle(sequence_indexes) n_correct = 0 @@ -132,9 +190,12 @@ def recurrence(n, hiddens, words, left, right): it = 0 for j in sequence_indexes: words, left, right, lab = trees[j] - c, p = self.train_op(words, left, right, lab) + c, p = self.train_op(words, left, right, lab, lr_) if np.isnan(c): - print "Cost is nan! Let's stop here. Why don't you try decreasing the learning rate?" + print("Cost is nan! Let's stop here. \ + Why don't you try decreasing the learning rate?") + for p in self.params: + print(p.get_value().sum()) exit() cost += c if train_inner_nodes: @@ -143,9 +204,16 @@ def recurrence(n, hiddens, words, left, right): n_correct += (p[-1] == lab[-1]) it += 1 if it % 1 == 0: - sys.stdout.write("j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % (it, N, float(n_correct)/n_total, cost)) + sys.stdout.write( + "j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % + (it, N, float(n_correct)/n_total, cost) + ) sys.stdout.flush() - print "i:", i, "cost:", cost, "correct rate:", (float(n_correct)/n_total), "time for epoch:", (datetime.now() - t0) + print( + "i:", i, "cost:", cost, + "correct rate:", (float(n_correct)/n_total), + "time for epoch:", (datetime.now() - t0) + ) costs.append(cost) plt.plot(costs) @@ -230,25 +298,25 @@ def main(is_binary=True): test = [t for t in test if t[3][-1] >= 0] # for filtering binary labels train = shuffle(train) - train = train[:5000] + # train = train[:5000] # n_pos = sum(t[3][-1] for t in train) - # print "n_pos train:", n_pos + # print("n_pos train:", n_pos) test = shuffle(test) test = test[:1000] # n_pos = sum(t[3][-1] for t in test) - # print "n_pos test:", n_pos + # print("n_pos test:", n_pos) V = len(word2idx) - print "vocab size:", V + print("vocab size:", V) D = 20 K = 2 if is_binary else 5 model = RecursiveNN(V, D, K) model.fit(train) - print "train accuracy:", model.score(train) - print "test accuracy:", model.score(test) - print "train f1:", model.f1_score(train) - print "test f1:", model.f1_score(test) + print("train accuracy:", model.score(train)) + print("test accuracy:", model.score(test)) + print("train f1:", model.f1_score(train)) + print("test f1:", model.f1_score(test)) if __name__ == '__main__': diff --git a/nlp_class2/tfidf_tsne.py b/nlp_class2/tfidf_tsne.py index cd59a84f..6c33ff42 100644 --- a/nlp_class2/tfidf_tsne.py +++ b/nlp_class2/tfidf_tsne.py @@ -1,6 +1,13 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import json import numpy as np import matplotlib.pyplot as plt @@ -35,25 +42,25 @@ def main(): for i in sentence: A[i,j] += 1 j += 1 - print "finished getting raw counts" + print("finished getting raw counts") transformer = TfidfTransformer() A = transformer.fit_transform(A) - # print "type(A):", type(A) + # print("type(A):", type(A)) # exit() A = A.toarray() - idx2word = {v:k for k, v in word2idx.iteritems()} + idx2word = {v:k for k, v in iteritems(word2idx)} # plot the data in 2-D tsne = TSNE() Z = tsne.fit_transform(A) plt.scatter(Z[:,0], Z[:,1]) - for i in xrange(V): + for i in range(V): try: - plt.annotate(s=idx2word[i].encode("utf8"), xy=(Z[i,0], Z[i,1])) + plt.annotate(s=idx2word[i].encode("utf8").decode("utf8"), xy=(Z[i,0], Z[i,1])) except: - print "bad string:", idx2word[i] + print("bad string:", idx2word[i]) plt.show() # create a higher-D word embedding, try word analogies diff --git a/nlp_class2/util.py b/nlp_class2/util.py index b6d94586..45df0d42 100644 --- a/nlp_class2/util.py +++ b/nlp_class2/util.py @@ -8,6 +8,7 @@ # sudo pip install -U future +import os import numpy as np def init_weight(Mi, Mo): @@ -132,6 +133,20 @@ def get_ptb_data(): # word2idx mapping, sentences # here the sentences should be Tree objects + if not os.path.exists('../large_files/trees'): + print("Please create ../large_files/trees relative to this file.") + print("train.txt and test.txt should be stored in there.") + print("Please download the data from http://nlp.stanford.edu/sentiment/") + exit() + elif not os.path.exists('../large_files/trees/train.txt'): + print("train.txt is not in ../large_files/trees/train.txt") + print("Please download the data from http://nlp.stanford.edu/sentiment/") + exit() + elif not os.path.exists('../large_files/trees/test.txt'): + print("test.txt is not in ../large_files/trees/test.txt") + print("Please download the data from http://nlp.stanford.edu/sentiment/") + exit() + word2idx = {} train = [] test = [] @@ -149,7 +164,7 @@ def get_ptb_data(): # break # test set - for line in open('../large_files/trees/train.txt'): + for line in open('../large_files/trees/test.txt'): line = line.rstrip() if line: t = str2tree(line, word2idx) diff --git a/nlp_class2/visualize_countries.py b/nlp_class2/visualize_countries.py index 283e6e52..9d0a44e8 100644 --- a/nlp_class2/visualize_countries.py +++ b/nlp_class2/visualize_countries.py @@ -1,6 +1,12 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import json import numpy as np import matplotlib.pyplot as plt @@ -24,7 +30,7 @@ def main(we_file='glove_model_50.npz', w2i_file='glove_word2idx_50.json'): Z = tsne.fit_transform(We) Z = Z[idx] plt.scatter(Z[:,0], Z[:,1]) - for i in xrange(len(words)): + for i in range(len(words)): plt.annotate(s=words[i], xy=(Z[i,0], Z[i,1])) plt.show() diff --git a/nlp_class2/word2vec.py b/nlp_class2/word2vec.py index 6d79551f..ea1c7182 100644 --- a/nlp_class2/word2vec.py +++ b/nlp_class2/word2vec.py @@ -1,6 +1,13 @@ # Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + import json import numpy as np import theano @@ -17,6 +24,20 @@ from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx +def get_text8(): + words = open('../large_files/text8').read() + word2idx = {} + sents = [[]] + count = 0 + for word in words.split(): + if word not in word2idx: + word2idx[word] = count + count += 1 + sents[0].append(word2idx[word]) + print("count:", count) + return sents, word2idx + + def sigmoid(x): return 1 / (1 + np.exp(-x)) @@ -44,9 +65,9 @@ def _get_pnw(self, X): word_freq[xj] = 0 word_freq[xj] += 1 self.Pnw = np.zeros(self.V) - for j in xrange(2, self.V): # 0 and 1 are the start and end tokens, we won't use those here + for j in range(2, self.V): # 0 and 1 are the start and end tokens, we won't use those here self.Pnw[j] = (word_freq[j] / float(word_count))**0.75 - # print "self.Pnw[2000]:", self.Pnw[2000] + assert(np.all(self.Pnw[2:] > 0)) return self.Pnw @@ -55,16 +76,14 @@ def _get_negative_samples(self, context, num_neg_samples): saved = {} for context_idx in context: saved[context_idx] = self.Pnw[context_idx] - # print "saving -- context id:", context_idx, "value:", self.Pnw[context_idx] self.Pnw[context_idx] = 0 neg_samples = np.random.choice( - xrange(self.V), + range(self.V), size=num_neg_samples, # this is arbitrary - number of negative samples to take replace=False, p=self.Pnw / np.sum(self.Pnw), ) - # print "saved:", saved - for j, pnwj in saved.iteritems(): + for j, pnwj in iteritems(saved): self.Pnw[j] = pnwj assert(np.all(self.Pnw[2:] > 0)) return neg_samples @@ -84,11 +103,11 @@ def fit(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoch costs = [] cost_per_epoch = [] sample_indices = range(N) - for i in xrange(epochs): + for i in range(epochs): t0 = datetime.now() sample_indices = shuffle(sample_indices) cost_per_epoch_i = [] - for it in xrange(N): + for it in range(N): j = sample_indices[it] x = X[j] # one sentence @@ -98,7 +117,7 @@ def fit(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoch cj = [] n = len(x) - # for jj in xrange(n): + # for jj in range(n): ########## try one random window per sentence ########### jj = np.random.choice(n) @@ -111,7 +130,6 @@ def fit(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoch # NOTE: context can contain DUPLICATES! # e.g. " cats and dogs" context = np.array(list(set(context)), dtype=np.int32) - # print "context:", context posA = Z.dot(self.W2[:,context]) pos_pY = sigmoid(posA) @@ -120,7 +138,6 @@ def fit(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoch # technically can remove this line now but leave for sanity checking # neg_samples = np.setdiff1d(neg_samples, Y[j]) - # print "number of negative samples:", len(neg_samples) negA = Z.dot(self.W2[:,neg_samples]) neg_pY = sigmoid(-negA) c = -np.log(pos_pY).sum() - np.log(neg_pY).sum() @@ -155,7 +172,10 @@ def fit(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoch epoch_cost = np.mean(cost_per_epoch_i) cost_per_epoch.append(epoch_cost) - print "time to complete epoch %d:" % i, (datetime.now() - t0), "cost:", epoch_cost + print( + "time to complete epoch %d:" % i, (datetime.now() - t0), + "cost:", epoch_cost + ) plt.plot(costs) plt.title("Numpy costs") plt.show() @@ -213,11 +233,11 @@ def fitt(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoc costs = [] cost_per_epoch = [] sample_indices = range(N) - for i in xrange(epochs): + for i in range(epochs): t0 = datetime.now() sample_indices = shuffle(sample_indices) cost_per_epoch_i = [] - for it in xrange(N): + for it in range(N): j = sample_indices[it] x = X[j] # one sentence @@ -227,7 +247,7 @@ def fitt(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoc cj = [] n = len(x) - # for jj in xrange(n): + # for jj in range(n): # start = max(0, jj - self.context_sz) # end = min(n, jj + 1 + self.context_sz) @@ -264,7 +284,10 @@ def fitt(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epoc epoch_cost = np.mean(cost_per_epoch_i) cost_per_epoch.append(epoch_cost) - print "time to complete epoch %d:" % i, (datetime.now() - t0), "cost:", epoch_cost + print( + "time to complete epoch %d:" % i, (datetime.now() - t0), + "cost:", epoch_cost + ) self.W1 = W1.get_value() self.W2 = W2.get_value() @@ -284,8 +307,9 @@ def save(self, fn): def main(use_brown=True): if use_brown: - # sentences, word2idx = get_sentences_with_word2idx_limit_vocab() - sentences, word2idx = get_sentences_with_word2idx() + sentences, word2idx = get_sentences_with_word2idx_limit_vocab() + # sentences, word2idx = get_sentences_with_word2idx() + # sentences, word2idx = get_text8() else: sentences, word2idx = get_wikipedia_data(n_files=1, n_vocab=2000) with open('w2v_word2idx.json', 'w') as f: @@ -293,7 +317,13 @@ def main(use_brown=True): V = len(word2idx) model = Model(50, V, 5) - model.fit(sentences, learning_rate=1e-3, mu=0, epochs=3, num_neg_samples=5) + + # use numpy + # model.fit(sentences, learning_rate=1e-3, mu=0, epochs=5, num_neg_samples=5) + + # use theano + model.fitt(sentences, learning_rate=1e-3, mu=0, epochs=5, num_neg_samples=5) + model.save('w2v_model.npz') @@ -309,7 +339,7 @@ def find_analogies(w1, w2, w3, concat=True, we_file='w2v_model.npz', w2i_file='w if concat: We = np.hstack([W1, W2.T]) - print "We.shape:", We.shape + print("We.shape:", We.shape) assert(V == We.shape[0]) else: We = (W1 + W2.T) / 2 @@ -317,9 +347,9 @@ def find_analogies(w1, w2, w3, concat=True, we_file='w2v_model.npz', w2i_file='w _find_analogies(w1, w2, w3, We, word2idx) if __name__ == '__main__': - main(use_brown=True) + main(use_brown=False) for concat in (True, False): - print "** concat:", concat + print("** concat:", concat) find_analogies('king', 'man', 'woman', concat=concat) find_analogies('france', 'paris', 'london', concat=concat) find_analogies('france', 'paris', 'rome', concat=concat) From f2782513f23f6545c69b0b1ba4e1bcc96e952819 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Fri, 29 Dec 2017 22:11:41 -0500 Subject: [PATCH 005/329] minor update --- nlp_class2/tfidf_tsne.py | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/nlp_class2/tfidf_tsne.py b/nlp_class2/tfidf_tsne.py index 6c33ff42..7861aba5 100644 --- a/nlp_class2/tfidf_tsne.py +++ b/nlp_class2/tfidf_tsne.py @@ -26,10 +26,28 @@ def main(): - sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=1500) - # sentences, word2idx = get_wikipedia_data(n_files=10, n_vocab=1500, by_paragraph=True) - with open('w2v_word2idx.json', 'w') as f: - json.dump(word2idx, f) + analogies_to_try = ( + ('king', 'man', 'woman'), + ('france', 'paris', 'london'), + ('france', 'paris', 'rome'), + ('paris', 'france', 'italy'), + ) + + # sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=1500) + sentences, word2idx = get_wikipedia_data(n_files=20, n_vocab=2000, by_paragraph=True) + # with open('tfidf_word2idx.json', 'w') as f: + # json.dump(word2idx, f) + + notfound = False + for word_list in analogies_to_try: + for w in word_list: + if w not in word2idx: + print("%s not found in vocab, remove it from \ + analogies to try or increase vocab size") + notfound = True + if notfound: + exit() + # build term document matrix V = len(word2idx) @@ -61,16 +79,18 @@ def main(): plt.annotate(s=idx2word[i].encode("utf8").decode("utf8"), xy=(Z[i,0], Z[i,1])) except: print("bad string:", idx2word[i]) - plt.show() + plt.draw() # create a higher-D word embedding, try word analogies # tsne = TSNE(n_components=3) # We = tsne.fit_transform(A) We = Z - find_analogies('king', 'man', 'woman', We, word2idx) - find_analogies('france', 'paris', 'london', We, word2idx) - find_analogies('france', 'paris', 'rome', We, word2idx) - find_analogies('paris', 'france', 'italy', We, word2idx) + + for word_list in analogies_to_try: + w1, w2, w3 = word_list + find_analogies(w1, w2, w3, We, word2idx) + + plt.show() # pause script until plot is closed if __name__ == '__main__': From a45a3b25140cf93fda93952540ab0857ff1478b8 Mon Sep 17 00:00:00 2001 From: Mac User Date: Tue, 2 Jan 2018 22:41:36 -0500 Subject: [PATCH 006/329] small improvement --- ann_logistic_extra/ann_predict.py | 2 +- ann_logistic_extra/ann_train.py | 18 ++--- ann_logistic_extra/logistic_predict.py | 2 +- ann_logistic_extra/logistic_softmax_train.py | 16 ++-- ann_logistic_extra/logistic_train.py | 12 +-- ann_logistic_extra/process.py | 77 ++++++++++++-------- 6 files changed, 64 insertions(+), 63 deletions(-) diff --git a/ann_logistic_extra/ann_predict.py b/ann_logistic_extra/ann_predict.py index a14ae2e6..23b9deab 100644 --- a/ann_logistic_extra/ann_predict.py +++ b/ann_logistic_extra/ann_predict.py @@ -7,7 +7,7 @@ import numpy as np from process import get_data -X, Y = get_data() +X, Y, _, _ = get_data() # randomly initialize weights M = 5 diff --git a/ann_logistic_extra/ann_train.py b/ann_logistic_extra/ann_train.py index 0527e1f9..5c84a7e4 100644 --- a/ann_logistic_extra/ann_train.py +++ b/ann_logistic_extra/ann_train.py @@ -17,19 +17,13 @@ def y2indicator(y, K): ind[i, y[i]] = 1 return ind -X, Y = get_data() -X, Y = shuffle(X, Y) -Y = Y.astype(np.int32) -M = 5 -D = X.shape[1] -K = len(set(Y)) - -# create train and test sets -Xtrain = X[:-100] -Ytrain = Y[:-100] +Xtrain, Ytrain, Xtest, Ytest = get_data() +D = Xtrain.shape[1] +K = len(set(Ytrain) | set(Ytest)) +M = 5 # num hidden units + +# convert to indicator Ytrain_ind = y2indicator(Ytrain, K) -Xtest = X[-100:] -Ytest = Y[-100:] Ytest_ind = y2indicator(Ytest, K) # randomly initialize weights diff --git a/ann_logistic_extra/logistic_predict.py b/ann_logistic_extra/logistic_predict.py index 9cb2409a..576cc81b 100644 --- a/ann_logistic_extra/logistic_predict.py +++ b/ann_logistic_extra/logistic_predict.py @@ -7,7 +7,7 @@ import numpy as np from process import get_binary_data -X, Y = get_binary_data() +X, Y, _, _ = get_binary_data() # randomly initialize weights D = X.shape[1] diff --git a/ann_logistic_extra/logistic_softmax_train.py b/ann_logistic_extra/logistic_softmax_train.py index 1647f89a..2bdc114f 100644 --- a/ann_logistic_extra/logistic_softmax_train.py +++ b/ann_logistic_extra/logistic_softmax_train.py @@ -17,18 +17,12 @@ def y2indicator(y, K): ind[i, y[i]] = 1 return ind -X, Y = get_data() -X, Y = shuffle(X, Y) -Y = Y.astype(np.int32) -D = X.shape[1] -K = len(set(Y)) - -# create train and test sets -Xtrain = X[:-100] -Ytrain = Y[:-100] +Xtrain, Ytrain, Xtest, Ytest = get_data() +D = Xtrain.shape[1] +K = len(set(Ytrain) | set(Ytest)) + +# convert to indicator Ytrain_ind = y2indicator(Ytrain, K) -Xtest = X[-100:] -Ytest = Y[-100:] Ytest_ind = y2indicator(Ytest, K) # randomly initialize weights diff --git a/ann_logistic_extra/logistic_train.py b/ann_logistic_extra/logistic_train.py index 25e11b95..c9a22815 100644 --- a/ann_logistic_extra/logistic_train.py +++ b/ann_logistic_extra/logistic_train.py @@ -10,17 +10,11 @@ from sklearn.utils import shuffle from process import get_binary_data -X, Y = get_binary_data() -X, Y = shuffle(X, Y) - -# create train and test sets -Xtrain = X[:-100] -Ytrain = Y[:-100] -Xtest = X[-100:] -Ytest = Y[-100:] +# get the data +Xtrain, Ytrain, Xtest, Ytest = get_binary_data() # randomly initialize weights -D = X.shape[1] +D = Xtrain.shape[1] W = np.random.randn(D) b = 0 # bias term diff --git a/ann_logistic_extra/process.py b/ann_logistic_extra/process.py index e43c4651..568ba107 100644 --- a/ann_logistic_extra/process.py +++ b/ann_logistic_extra/process.py @@ -15,43 +15,62 @@ # one-hot categorical columns def get_data(): - df = pd.read_csv(dir_path + '/ecommerce_data.csv') + df = pd.read_csv(dir_path + '/ecommerce_data.csv') - # just in case you're curious what's in it - # df.head() + # just in case you're curious what's in it + # df.head() - # easier to work with numpy array - data = df.as_matrix() + # easier to work with numpy array + data = df.as_matrix() - X = data[:,:-1] - Y = data[:,-1] + # shuffle it + np.random.shuffle(data) - # normalize columns 1 and 2 - X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() - X[:,2] = (X[:,2] - X[:,2].mean()) / X[:,2].std() + # split features and labels + X = data[:,:-1] + Y = data[:,-1].astype(np.int32) - # create a new matrix X2 with the correct number of columns - N, D = X.shape - X2 = np.zeros((N, D+3)) - X2[:,0:(D-1)] = X[:,0:(D-1)] # non-categorical + # one-hot encode the categorical data + # create a new matrix X2 with the correct number of columns + N, D = X.shape + X2 = np.zeros((N, D+3)) + X2[:,0:(D-1)] = X[:,0:(D-1)] # non-categorical - # one-hot - for n in range(N): - t = int(X[n,D-1]) - X2[n,t+D-1] = 1 + # one-hot + for n in range(N): + t = int(X[n,D-1]) + X2[n,t+D-1] = 1 - # method 2 - # Z = np.zeros((N, 4)) - # Z[np.arange(N), X[:,D-1].astype(np.int32)] = 1 - # # assign: X2[:,-4:] = Z - # assert(np.abs(X2[:,-4:] - Z).sum() < 1e-10) + # method 2 + # Z = np.zeros((N, 4)) + # Z[np.arange(N), X[:,D-1].astype(np.int32)] = 1 + # # assign: X2[:,-4:] = Z + # assert(np.abs(X2[:,-4:] - Z).sum() < 1e-10) - return X2, Y + # assign X2 back to X, since we don't need original anymore + X = X2 + + # split train and test + Xtrain = X[:-100] + Ytrain = Y[:-100] + Xtest = X[-100:] + Ytest = Y[-100:] + + # normalize columns 1 and 2 + for i in (1, 2): + m = Xtrain[:,i].mean() + s = Xtrain[:,i].std() + Xtrain[:,i] = (Xtrain[:,i] - m) / s + Xtest[:,i] = (Xtest[:,i] - m) / s + + return Xtrain, Ytrain, Xtest, Ytest def get_binary_data(): - # return only the data from the first 2 classes - X, Y = get_data() - X2 = X[Y <= 1] - Y2 = Y[Y <= 1] - return X2, Y2 + # return only the data from the first 2 classes + Xtrain, Ytrain, Xtest, Ytest = get_data() + X2train = Xtrain[Ytrain <= 1] + Y2train = Ytrain[Ytrain <= 1] + X2test = Xtest[Ytest <= 1] + Y2test = Ytest[Ytest <= 1] + return X2train, Y2train, X2test, Y2test From 0795835195cd78495e715e3df248d27e2038037b Mon Sep 17 00:00:00 2001 From: Mac User Date: Sun, 7 Jan 2018 00:29:32 -0500 Subject: [PATCH 007/329] fix --- rl2/cartpole/save_a_video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rl2/cartpole/save_a_video.py b/rl2/cartpole/save_a_video.py index 57e7ed19..31690c29 100644 --- a/rl2/cartpole/save_a_video.py +++ b/rl2/cartpole/save_a_video.py @@ -45,7 +45,7 @@ def random_search(env): episode_lengths = [] best = 0 params = None - for t in xrange(100): + for t in range(100): new_params = np.random.random(4)*2 - 1 avg_length = play_multiple_episodes(env, 100, new_params) episode_lengths.append(avg_length) From 50fcb15a84a562c241e660457b0de7e7324d857b Mon Sep 17 00:00:00 2001 From: Mac User Date: Mon, 8 Jan 2018 13:49:43 -0500 Subject: [PATCH 008/329] remove useless comment --- nlp_class2/glove.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nlp_class2/glove.py b/nlp_class2/glove.py index 0fb996bc..cd035dc4 100644 --- a/nlp_class2/glove.py +++ b/nlp_class2/glove.py @@ -19,10 +19,6 @@ from sklearn.utils import shuffle from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab -# Experiments -# previous results did not make sense b/c X was built incorrectly -# redo b/c b and c were not being added correctly as 2-D objects - # using ALS, what's the least # files to get correct analogies? # use this for word2vec training to make it faster # first tried 20 files --> not enough From 9bd1396504c5b6111c6a87ce82dd3923499ebd33 Mon Sep 17 00:00:00 2001 From: Mac User Date: Mon, 8 Jan 2018 15:27:46 -0500 Subject: [PATCH 009/329] remove load arg --- supervised_class/knn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervised_class/knn.py b/supervised_class/knn.py index 37b97067..015bffef 100644 --- a/supervised_class/knn.py +++ b/supervised_class/knn.py @@ -30,7 +30,7 @@ def fit(self, X, y): def predict(self, X): y = np.zeros(len(X)) for i,x in enumerate(X): # test points - sl = SortedList(load=self.k) # stores (distance, class) tuples + sl = SortedList() # stores (distance, class) tuples for j,xt in enumerate(self.X): # training points diff = x - xt d = diff.dot(diff) From 5d40777ff56cfdb7b378e1ea995c377ccbd7bc22 Mon Sep 17 00:00:00 2001 From: Mac User Date: Fri, 19 Jan 2018 13:25:12 -0500 Subject: [PATCH 010/329] update --- ann_class2/dropout_tensorflow.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ann_class2/dropout_tensorflow.py b/ann_class2/dropout_tensorflow.py index 09535f85..1247a912 100644 --- a/ann_class2/dropout_tensorflow.py +++ b/ann_class2/dropout_tensorflow.py @@ -124,8 +124,7 @@ def forward(self, X): def forward_test(self, X): Z = X - Z = tf.nn.dropout(Z, self.dropout_rates[0]) - for h, p in zip(self.hidden_layers, self.dropout_rates[1:]): + for h in zip(self.hidden_layers): Z = h.forward(Z) return tf.matmul(Z, self.W) + self.b From 53c89dd7f1efa6f0d6fbba8eee3fc6808511bd42 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Fri, 19 Jan 2018 14:51:06 -0500 Subject: [PATCH 011/329] update --- ann_class2/dropout_tensorflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ann_class2/dropout_tensorflow.py b/ann_class2/dropout_tensorflow.py index 1247a912..76b8bac8 100644 --- a/ann_class2/dropout_tensorflow.py +++ b/ann_class2/dropout_tensorflow.py @@ -124,7 +124,7 @@ def forward(self, X): def forward_test(self, X): Z = X - for h in zip(self.hidden_layers): + for h in self.hidden_layers: Z = h.forward(Z) return tf.matmul(Z, self.W) + self.b From dd15c9ac4da2698b2e209ad02827f382d19c615c Mon Sep 17 00:00:00 2001 From: Mac User Date: Sun, 28 Jan 2018 20:20:18 -0500 Subject: [PATCH 012/329] fix comment --- ann_class/forwardprop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ann_class/forwardprop.py b/ann_class/forwardprop.py index b67e2fac..2d5c7a8c 100644 --- a/ann_class/forwardprop.py +++ b/ann_class/forwardprop.py @@ -42,7 +42,7 @@ def sigmoid(a): def forward(X, W1, b1, W2, b2): Z = sigmoid(X.dot(W1) + b1) # sigmoid # Z = np.tanh(X.dot(W1) + b1) # tanh - # Z = np.maximum(X.dot(W1) + b1) # relu + # Z = np.maximum(X.dot(W1) + b1, 0) # relu A = Z.dot(W2) + b2 expA = np.exp(A) Y = expA / expA.sum(axis=1, keepdims=True) From 5f98a8e61849610a51720889d9be510c2441a607 Mon Sep 17 00:00:00 2001 From: Mac User Date: Sat, 3 Feb 2018 18:51:40 -0500 Subject: [PATCH 013/329] forgot to update max --- rl2/mountaincar/pg_tf_random.py | 1 + rl2/mountaincar/pg_theano_random.py | 1 + 2 files changed, 2 insertions(+) diff --git a/rl2/mountaincar/pg_tf_random.py b/rl2/mountaincar/pg_tf_random.py index 3feea837..430281a0 100644 --- a/rl2/mountaincar/pg_tf_random.py +++ b/rl2/mountaincar/pg_tf_random.py @@ -220,6 +220,7 @@ def random_search(env, pmodel, gamma): if avg_totalrewards > best_avg_totalreward: best_pmodel = tmp_pmodel + best_avg_totalreward = avg_totalrewards return totalrewards, best_pmodel diff --git a/rl2/mountaincar/pg_theano_random.py b/rl2/mountaincar/pg_theano_random.py index 1f6a45b8..9ac07b16 100644 --- a/rl2/mountaincar/pg_theano_random.py +++ b/rl2/mountaincar/pg_theano_random.py @@ -191,6 +191,7 @@ def random_search(env, pmodel, gamma): if avg_totalrewards > best_avg_totalreward: best_pmodel = tmp_pmodel + best_avg_totalreward = avg_totalrewards return totalrewards, best_pmodel From a08f2735cdfe02b970ee8185dfa085e798275cf8 Mon Sep 17 00:00:00 2001 From: Mac User Date: Thu, 15 Feb 2018 04:46:55 -0500 Subject: [PATCH 014/329] small update rnn --- rnn_class/srn_language.py | 4 ++-- rnn_class/srn_language_tf.py | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/rnn_class/srn_language.py b/rnn_class/srn_language.py index d86d3a22..b02a7f0c 100644 --- a/rnn_class/srn_language.py +++ b/rnn_class/srn_language.py @@ -79,7 +79,7 @@ def recurrence(x_t, h_t1): updates = [] for p, dp, g in zip(self.params, dparams, grads): - new_dp = mu*dp - lr*g + new_dp = mu*dp - learning_rate*g updates.append((dp, new_dp)) new_p = p + new_dp @@ -235,7 +235,7 @@ def wikipedia(): rnn.fit(sentences, learning_rate=1e-4, show_fig=True, activation=T.nnet.relu) if __name__ == '__main__': - # train_poetry() + train_poetry() generate_poetry() # wikipedia() diff --git a/rnn_class/srn_language_tf.py b/rnn_class/srn_language_tf.py index b57dd69c..ff619a83 100644 --- a/rnn_class/srn_language_tf.py +++ b/rnn_class/srn_language_tf.py @@ -54,17 +54,21 @@ def build(self, We, Wx, Wh, bh, h0, Wo, bo): # X_one_hot.dot(We) XW = tf.nn.embedding_lookup(We, self.tfX) + # multiply it by input->hidden so we don't have to do + # it inside recurrence + XW_Wx = tf.matmul(XW, self.Wx) - def recurrence(h_t1, xWe_t): + + def recurrence(h_t1, XW_Wx_t): # returns h(t), y(t) h_t1 = tf.reshape(h_t1, (1, M)) - h_t = self.f(xWe_t + tf.matmul(h_t1, self.Wh) + self.bh) + h_t = self.f(XW_Wx_t + tf.matmul(h_t1, self.Wh) + self.bh) h_t = tf.reshape(h_t, (M,)) return h_t h = tf.scan( fn=recurrence, - elems=XW, + elems=XW_Wx, initializer=self.h0, ) From f3b0eac44560d214897311c1acd89d098ff10b49 Mon Sep 17 00:00:00 2001 From: Mac User Date: Fri, 16 Feb 2018 14:43:32 -0500 Subject: [PATCH 015/329] add get_state_sequence for continuous --- hmm_class/hmmc.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/hmm_class/hmmc.py b/hmm_class/hmmc.py index d3a38cab..c66f4476 100644 --- a/hmm_class/hmmc.py +++ b/hmm_class/hmmc.py @@ -191,6 +191,35 @@ def likelihood(self, x): alpha[t] = alpha[t-1].dot(self.A) * B[:,t] return alpha[-1].sum() + def get_state_sequence(self, x): + # returns the most likely state sequence given observed sequence x + # using the Viterbi algorithm + T = len(x) + + # make the emission matrix B + B = np.zeros((self.M, T)) + for j in range(self.M): + for t in range(T): + for k in range(self.K): + p = self.R[j,k] * mvn.pdf(x[t], self.mu[j,k], self.sigma[j,k]) + B[j,t] += p + + # perform Viterbi as usual + delta = np.zeros((T, self.M)) + psi = np.zeros((T, self.M)) + delta[0] = self.pi*B[:,0] + for t in range(1, T): + for j in range(self.M): + delta[t,j] = np.max(delta[t-1]*self.A[:,j]) * B[j,t] + psi[t,j] = np.argmax(delta[t-1]*self.A[:,j]) + + # backtrack + states = np.zeros(T, dtype=np.int32) + states[T-1] = np.argmax(delta[T-1]) + for t in range(T-2, -1, -1): + states[t] = psi[t+1, states[t+1]] + return states + def likelihood_multi(self, X): return np.array([self.likelihood(x) for x in X]) @@ -245,6 +274,10 @@ def fake_signal(init=simple_init): L = hmm.log_likelihood_multi(signals).sum() print("LL for actual params:", L) + # print most likely state sequence + print("Most likely state sequence for initial observation:") + print(hmm.get_state_sequence(signals[0])) + if __name__ == '__main__': # real_signal() # will break fake_signal(init=simple_init) From 2178b9c2c178555a2cf8903dcf11faadaf4ac310 Mon Sep 17 00:00:00 2001 From: Mac User Date: Fri, 16 Feb 2018 14:59:15 -0500 Subject: [PATCH 016/329] add log version too --- hmm_class/hmmc_scaled_concat_diag.py | 44 ++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/hmm_class/hmmc_scaled_concat_diag.py b/hmm_class/hmmc_scaled_concat_diag.py index 3ae4edf6..692c1782 100644 --- a/hmm_class/hmmc_scaled_concat_diag.py +++ b/hmm_class/hmmc_scaled_concat_diag.py @@ -195,6 +195,42 @@ def log_likelihood(self, x): alpha[t] = alpha_t_prime / scale[t] return np.log(scale).sum() + def get_state_sequence(self, x): + # returns the most likely state sequence given observed sequence x + # using the Viterbi algorithm + T = len(x) + + # make the emission matrix B + logB = np.zeros((self.M, T)) + for j in range(self.M): + for t in range(T): + for k in range(self.K): + p = np.log(self.R[j,k]) + mvn.logpdf(x[t], self.mu[j,k], self.sigma[j,k]) + logB[j,t] += p + print("logB:", logB) + + # perform Viterbi as usual + delta = np.zeros((T, self.M)) + psi = np.zeros((T, self.M)) + + # smooth pi in case it is 0 + pi = self.pi + 1e-10 + pi /= pi.sum() + + delta[0] = np.log(pi) + logB[:,0] + for t in range(1, T): + for j in range(self.M): + next_delta = delta[t-1] + np.log(self.A[:,j]) + delta[t,j] = np.max(next_delta) + logB[j,t] + psi[t,j] = np.argmax(next_delta) + + # backtrack + states = np.zeros(T, dtype=np.int32) + states[T-1] = np.argmax(delta[T-1]) + for t in range(T-2, -1, -1): + states[t] = psi[t+1, states[t+1]] + return states + def log_likelihood_multi(self, X): return np.array([self.log_likelihood(x) for x in X]) @@ -247,7 +283,11 @@ def fake_signal(init=big_init): L = hmm.log_likelihood_multi(signals).sum() print("LL for actual params:", L) + # print most likely state sequence + print("Most likely state sequence for initial observation:") + print(hmm.get_state_sequence(signals[0])) + if __name__ == '__main__': - real_signal() - # fake_signal() + # real_signal() + fake_signal() From 1989dd2480462d05eb409d80fd7803a7af50c956 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Sun, 18 Feb 2018 03:32:24 -0500 Subject: [PATCH 017/329] cnn2 --- cnn_class2/content/elephant.jpg | Bin 0 -> 22114 bytes cnn_class2/content/sydney.jpg | Bin 0 -> 81172 bytes cnn_class2/extra_reading.txt | 14 + cnn_class2/fashion.py | 109 ++++++++ cnn_class2/fashion2.py | 104 +++++++ cnn_class2/make_limited_datasets.py | 39 +++ cnn_class2/ssd.py | 133 +++++++++ cnn_class2/style_transfer1.py | 167 +++++++++++ cnn_class2/style_transfer2.py | 144 ++++++++++ cnn_class2/style_transfer3.py | 132 +++++++++ cnn_class2/styles/flowercarrier.jpg | Bin 0 -> 95471 bytes cnn_class2/styles/lesdemoisellesdavignon.jpg | Bin 0 -> 181282 bytes cnn_class2/styles/monalisa.jpg | Bin 0 -> 231437 bytes cnn_class2/styles/starrynight.jpg | Bin 0 -> 34329 bytes cnn_class2/tf_resnet.py | 260 ++++++++++++++++++ cnn_class2/tf_resnet_convblock.py | 203 ++++++++++++++ cnn_class2/tf_resnet_convblock_starter.py | 35 +++ cnn_class2/tf_resnet_first_layers.py | 155 +++++++++++ cnn_class2/tf_resnet_first_layers_starter.py | 91 ++++++ cnn_class2/tf_resnet_identity_block.py | 118 ++++++++ .../tf_resnet_identity_block_starter.py | 40 +++ cnn_class2/use_pretrained_weights_resnet.py | 183 ++++++++++++ cnn_class2/use_pretrained_weights_vgg.py | 182 ++++++++++++ cnn_class2/util.py | 55 ++++ 24 files changed, 2164 insertions(+) create mode 100644 cnn_class2/content/elephant.jpg create mode 100644 cnn_class2/content/sydney.jpg create mode 100644 cnn_class2/extra_reading.txt create mode 100644 cnn_class2/fashion.py create mode 100644 cnn_class2/fashion2.py create mode 100644 cnn_class2/make_limited_datasets.py create mode 100644 cnn_class2/ssd.py create mode 100644 cnn_class2/style_transfer1.py create mode 100644 cnn_class2/style_transfer2.py create mode 100644 cnn_class2/style_transfer3.py create mode 100644 cnn_class2/styles/flowercarrier.jpg create mode 100644 cnn_class2/styles/lesdemoisellesdavignon.jpg create mode 100644 cnn_class2/styles/monalisa.jpg create mode 100644 cnn_class2/styles/starrynight.jpg create mode 100644 cnn_class2/tf_resnet.py create mode 100644 cnn_class2/tf_resnet_convblock.py create mode 100644 cnn_class2/tf_resnet_convblock_starter.py create mode 100644 cnn_class2/tf_resnet_first_layers.py create mode 100644 cnn_class2/tf_resnet_first_layers_starter.py create mode 100644 cnn_class2/tf_resnet_identity_block.py create mode 100644 cnn_class2/tf_resnet_identity_block_starter.py create mode 100644 cnn_class2/use_pretrained_weights_resnet.py create mode 100644 cnn_class2/use_pretrained_weights_vgg.py create mode 100644 cnn_class2/util.py diff --git a/cnn_class2/content/elephant.jpg b/cnn_class2/content/elephant.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9df55b30195c9cb55e70667156e1c81e09035d89 GIT binary patch literal 22114 zcmbTcXH*k!`1cv4gpM=?2|e`kqm$4d5D;tu(*WS|2V-TAt50rBWIwXU=U*mv5WnGUVr-l)Wm=vc!KzN9DoPZc=*(K ze+L1q|Mp3U_rDh4|6F(v@CgWsfW#!EWd96W9s(ZV;p0Cbz$YXmAoyn;`tLq~fSQno zT~wKfR^JxL;Y}wNnNmp1snXC-Z}9CmSlrGhiiGshV+O`2T--doeEbrUQqmBp%+qJj zRn^orG!2c6O-#+qU)no3Iyt+%c7^%+`3D3B1xLS&iG3dzpOBiCo{{-6D?6vCxTLhK zyrQzIv8lPGwXMCQ^UJ{C*CF)q$msOU?A-jq;?nYuP3+e84({jf-to!l+4;rgpR4Qt z;lcyp|8J~+*Z&Rn|KOtj$Mt}K0G|N(KU{bZ0{)%&)C7d=qC_;x`aoN6S`M*DVmg(S z!iIhlPH}_Z^mabqNFRYEesCTC58D4C`+o-(_5X|P{{s8Jxt0Os_;~+59zHcd5rEyI z(fny%5XVQL{zSa3W5coG5aa5ZSExCyD_J_%GR^sRk^twT(94T$gY5A&kkNEGxP3JM ziZ-5Jv!0$~pSiAV+6#V6WYc@3)+H1{Btw1epzpa8LSwPQRq2cbdjiMfs7@7UjlXu2 zhHYkY0-}?wdlS7cKi2%0u8D(!Do6SyOUt`Dh?E(X#N&y~e4wmjy|pmS608vU5l^*D zHcr)LuBNv1{TskYFGb%lb!UW5_y>8h;CbK3mynW3Q#X+@8gEn zqSW4m7b@PlhSvUW_oI$j`U%-OyS;PeiA!(e>dN@#xf4Gxp_?~JTOJk-JgF5DzO@)` zBhs+T;od3$A$X7CusLrd}1On`%qn8WClq~V29PkM{cv$26_RlI~GSUU zzrAE|aOZXCFQJ#ja6WE*Vj5lPtY5nRY!A@#8a1uDGF%VajXA3vHn7Kk0X)AeDN>6| zY{}Gt1W05!^5WiDryCWuwhC{F?0UyIAQ44 z7BY;Xd?lO1J*c4C2k(at72^fEjj>WWz{xB{8G{$75TkK4b%E`n&UJA$WtXv@BGc|N ztx&rdg8l_SB+^{I|Cj5*M!H8dz{djuy)Cc@`#{jaCR%MUaAH;yqPlAz!%)VX%7>rH zb^UJA3lFWBe^O{{X{Ed(^;gqow?a)*-l{Q@pmBci zMolBnxVk*?E~@7*pj5`}OcS>AFlBqE&`YfX{%OFZA}Gk!Ij) zhW|2vqvZ_vZ17WfFzI`9ZXN+ftWjf=F308Yrj}?kpf^|LbGeqN{Y- z)^nyxn=j7U4_P3iMG@)@yKn$xv$bem@>OGZxLc~EAG@~aR0bF)uziRtw>kv>4q!yL|Pk+>dBlI{?UHEw0&DScK4%<1yDxp$+~)>mw}q2{SN(`j|>dV;;7~XM<)*RV&cU`F&UJ1d%y+y zFTkQO@?f()Q@=c(ZG^a%&yYn8C~E;`S*&96v_Uqyll4Y;1ztx$vCOh(WyqJauhEqc z;y}(Kc~t?PcnEo_AJIwS!E!%0iNJB5Kg;-}qF0isbB=t@#~%;BZo!%QHD^Rpw}oaYGYW-ux;4#awTADk7_ zZ17OuAJF|tAbJwGP|gEAui8fG5j^|~c2(k5ZbmG!jxFpyPi_`sRJAi}jUnk@4-I6- zG~Bv42ZRetE$gn!^I6kS#|1GVBcK~7-Kz-bo^$&HSMn+A+bWsc}=LqqAlBv)W&7fctXK>_oTirAJHd zvdUEAh_0aVTgz(=($vUQ~q9bR}vpYBI=}EL<_d^a#HGW~7$8pCV zaLq1{fRH?QXtv-U#7R`zSUt|FSbAVE3H9I=oOTw1S#|=B21d5$jn8fU|<~7_f>% zV%M8y$Mg4!(nd{;1Rn>0`~Wz)=ziS7H%;LuV4;?W>mLG-S+=(_8MaW_kllr=&A35v zP15A5E;E@VovZKNtviGc*~#`ks)KM`B?EqWMXfqGAWLv>@L@o5SF7)pcXB9# z_&Omu$W|rO{4vweJVg5vH43t6}Q4KUXBQ@ zH)6N_Cv|X$bwV)tbr?rc)Kkgs8fv;Tw(Wmm`S54>P?=T>ym3f%C_O_@Ruq?12$4%c zzcuk2AD{O4@U{zKI>KVFAl~i)N?PchPOBUr%3FesQTOKP37iR9q=w*FkV$>h($__` z`7balk*MChr;^iOyx9R2{1vd1R^YkBFz&%d#t_}zJP3aZ$?_NQ!@P^2F5W)Mo!_#v zn$vcaR}NPWK}$LcUEJ0SJofbIu3JZjs>}I-Liw8)lrSv&{^pS@{Aum95oSnoIPxz* zxL`;93&yii(6y@X1Ue~-414Qew$>R>I|4CMW{x^t;{l^k{ezHyfcLXp}MoIm7_;nWdTIe`MIjsWmGE z3Uzec0mHt(s`PfrZd@7xh$zO`>2Sr54;lHb;>%}-aogBcQVIUe+Q4b=}vlc z-$J1f<(_#W>Uoowlo{*$x*hPs)&sPmyhYosIz1e~eG}hqq3B z1AJEHz!=>A$X~#|UxCoLS`b@IAKS5kY|xtyoe<~aN*h|8ng;wGN>W^{q?3fHTb5D8 zw;x#um2>7haeev5(Wh@5v5~3XFTFL7SLg!2Wu=WF=V+$89ubSC!PWi(<_mvPtk~zp z5^9wR`4=j&ffJS(E=bjMF*yl4qe0(CPI!`OPrzEIwE4xz8GDtkkd)doqKk zY^}s59CH5EX!$vmYihN%+e+$0HtYH4bFkBk@s3Fym5nw9B<#qFKDJh9{iD!IiOVS3 zXTWv57L_fi1WeLX10pxuE3DmP^_pm#>0fl{0bBiX`=}n*8*3{jiGh;T_F=05wyiMB z2fD1*Eh6>2jfOH7wPA~ekv~Y-nKVvHtOFs#Y&e3G@>=iUZ@mc@{@n@3PoFWeJagbO z+Mw>L6F=;OoKw2R3vH3KtClDB$1^hAbYp)Xgp)ow6fOxsl0~23ASj{G$XrHtKKas#Zv4v(0{LY({HlCb^fA_$uk8>H;C zLi$KhIhjVwb7Tav2isjmyfC{_o8S{^I&J|yEVz)5+mnSM#4Xp0I;%vUIj_QDmhT z59ZzDP7<~V8$kVmVnD2AFx@V#=O^znQ_K431Rdx(f%srl(Byg+4p=LViLm5qWdD!% zODrkm=#(}}ksXDa7|1g15-ess)CUi{#~AC8m|*LndurSIU#2^y$MVx`-ECoL9%dOV zLL}^8O!89pz;$o~?MxNDhydi?a4bYo_bm~OQp5UD#TR#TBZS30jt5%7Em6oZ> zt2S^PX~d{@^dAyVMP3FrhhQAAj*sn?JT^8k;_V^7Iwy!K9m~Lq5Z58~w0&TyUCsdA zPMX;Z&VOkfX>VYn#BfeSn;e!UG_yEWSgH9W!}GDeYE~xk=8CVkfO@(AqOY78g zf7bVxoBFr3KOc;8?fc@3RekDtzchh-2H?)+__wa%qAY9q0v)eVI!bI&R6i9^Eze$A z$-K?g_rhCB7QdREnh;N}0)4vwxqqd9AlWlPPa!R2qhr_Mr#_tGKnxj+Tv$AzDpm3< zc2Y9p1^kvJ?TjG@UMgpOI*cXo#jxTM;^P)eX}ENvw2}WY-zk!xS7tnh@hPKy=qWNz z76jFqK9LZ>(7k{BD*cD`TDMK)JTL~ZA;nrl#Ke~IQ_d*Zw?6caVtuWpxEjgH>{TNC z6RA$<$ewJz`?t~StoHrN{}$KP3~`O(&P=KStb<9xp9Hbgc;KdL>dSV{nHiS+ddaNI zXO_<6wkR_XMZW&wUBlXq>`$sn?lv~G%ZaT|8kZgmvlD8n4t`=46++}cO>(V~isfFzha2(*X{1u_D-3(J*zVKrQo2GFq%-( zmd?Z(xD~LE4A*qsg;`QFcjpFy9pJdTZwr3`wO3*Sx&C-ds@w8Uz1HM!yiM%8D3w4s ze8$5+4wv+Af0lwTIYSj3(MS3g;7oIA`xVH=u5s#+$3ewO$7Gm|1t=Jc=)M2!X8cU2 zFTNWEvz=TmU*n<+vF({12LXu+VnFCo7VGbQ zPLCXLSlVk=nC zp{Q)wTA~ORrI9jOJivJ|p0Rw-uPjeDQQRA|ha*|{)tTRR6Tan9Th^U8zYWiL=A?@L zbq#>p{)_yj)ie;SZ+rtViwIy3PpX#>4mjUkZa$DCGu1HMNs#KdvG89FVDrG#>(8|6 zh=n(S5v%c>84UKGs`TQMGAueG3hxna;RC0b(lyC$X|VyO7;@WmRr6SpfQM?9EXudC zq`5l+X$G5CHT`dMJD3?_?7qFb(k)|bg0qetR{^D&?1Q*tRc^`a`lLa^>!SJA-bvc? zxcHjq+v6VOKJTe^G`x3$VomzC4rK9gioDEI`T zTeyxYNjpwjYf_N>Ln1mFEbRng+|`q*DnD{}hD3nJmuTRu^J#U!v!|@cf?ww*ih;1J zJ6RLwixgEBClwY=F_aFFXC>cyv!lg=X4|>YRip8x@dG`CKG=U~2*~ipTEr`G%dcua z^+QOziHeyx2x9jxX>(6rJbDinJ>6nU`amvdy(3+n;gF&^?Mz|MSD+0ew z9qq8sx9X$*-f&j)z3obSQvL0T>3oXctZiZ+-YwXk{@XzTB1T7`(7V8eiId`fem&?C zTDq+s8o_GmDe=>fVO>maj$BPSMUQNuHEWoaKM;HZoUi(JV3m9h4gWDYEn^>Tkj?%o zy8zC5gojXQv2-(NA5LS-x1riL3~im?;Yd^IGPCzJQ~8VqIqy+OU>7Zm2dgwhMG4Ls zi(dy`q<#(S#myH!37hGhKW1Trni7cYE;=hB_~b??ssGsCaz!eZQVu-jj%d}{xxAD# zkqG@`NIiPmj%2xCdv0|a=c%KnB=2hGd0V6rE1t6Id^O!=@d#d-zE!>k2Ypf=I=`j!07+J-`=-JXTNd4hw zbQr+^JCbt={+I#LSISCeb8V`*x~C@{s>?w*7VPasdED=qkNBJfd^ZF28^K00p?~6^ z@EGylIvgitbCGU2c-#zp|0iz^H9WOu$9t7M?MS82>`)sGIZ6$@z&_ybTh~wR8Ei3D z>X4(Aw(z>}NwsTzLFWcEND^-r>+TBlNSHZ+DBnO#25ZOjmOt3Dc=*Swi60GM~j)hWaI^5VJq8 zA9~u-5&>9MD5*BQk8~yk5++=&P$%jtqwh)cydYT6+y@Vha@9muH}*{a)RED@+#OO$11+J<_j4P{-u7biQP^EgcD;=cQQ~m zF|S-8SO5rl{jGP{k}PyMaAbFJ!{%lO)zGtLZRu%5Wnn=@P*m+U2LTv7W& zY1EQxPzoa7`nXbAs`4%J^kReIeO$|+S6duI+v6;bm}3U{5P5BcRd{;H?T zn<~xO4IBg{3R+h2PP>eUU=?wtW|z(CD+$9Bv7|u;KI$8wVRV z<`SLc#Re2!%HX0u4&$%L{RJr2`pq<+%j|v`9*kWH^m7>@)K#7?3Ke>IJJF8xqT<{) zySzU8VkeYm9}n5`yX~#DYMDz_e~S^P_>OW@O{8V-$x|N>Sz6~ou@m-URyCpky7SH z5IorxtoMq%{b~axPil_73rm0J%r%@F6?9I|p+lEEAm@KpbzR&2NQmy!=ecC;>n+-m z;WL&w?Q!N%hJd1NPP0{xlVbi5+UYzT_qdnxwmLohBZFLLj2!5qa^TrUWiFzRvbjb! zN5Lmq^pvz>a{LxsE14sqhyP3A`(1E+UPJQx#WU7CwhkxRymwK30rX*vr%~$XcLF5L z@OQM4yE$z;+nu=I1*Xy)o$I7&GbuTVb;@bb<$+`)EIE|5*17L+he%K5VnWnYd4Z!5 zkh!!&<0J0{R6yVEq_=XH=FH|FEO3})=TUs_0|=e^i0R7rDw}NnSYadb)Z944goT_9 zxvB>0iNlALhLo|28Q}41;*8n6f$BDGR^90igN3KDYwu11c!UH4!4KfXoJptm&!Ix} zBzW+ewE*4`ECM*4j>#H&O2p$y79p>To!NhpX+#{50(zgg1T{=UtY>(L%^g@IOqzdb z_Ed3r@E~f5;U{1Oba7UxlM(ncCON&I@InlZKPmRFe!f5N5TGky2lVoWH`*&|CyS$> zF4wNCYtU6St!1%0aXm59Fo1pkH1ZR6yYEsL`)STKeq%=1g1GpQ;N+POsee$qJbLi$ zOLBL+suMw`9)vs?pM`F?@`*F5Stb$Y|c_@ zP}f7=_6^>sAR<`MvkrG|I!v$E2}lQAqlmuTms~sd2B#g27sKEz13bU0%8i;#_}k(m zS9`7HCmu7s27$dKzJ|RiV>-hDS?^p~mX%O1#bgeLg05iKzL+i7S&uHxEe|y3YsLGz zyE{S%v{g;-#?e6a_ViCU>Kw;Y`2`;2QDS5dJ}~9HG}BW$AHn^G){=#ohm95p6va$l z?9Ofl2PkF;9X44%?_5jJh(?hB_peZ|eg6W)$VAM3`J1q>cteNeukBkrzl*gWhK^uA z@s^C@m>=^*iB1`oCOp&*>o$J1c>lW?6d9D4+TO6WF?Ab2q?SxBl6K$Rf1xCHE-5(gMSBXY);tfL& zO=w?VQH2L*5;t`Z-An<3u}#Kt<%z4F2K6iW*K=Gpk!dFqN7)%x8knaeB~ljiT#sd* zKIQ%QN(8+4CzwvP-zHeiAivwgU@C?7oSLRQK98nV0Rdh6_B@+;W>I(XhAR!IzG zLpy+nsoxZ%46$oDGzHh=X+U{Wq5J*#e*tsTY1B@uy==u)M?MLF>xTXHd_Uq3#xvXm zjN582`uqtsFaHAElLnTv$ymij@w$p^D9aI|eYc7-e$!ePYZ9VPX<`rFoqh{MduH7d zKAnxf5NxUWt*{ z6DX#dwOZcUrUEQD{~6+&=0`b(U*MK9?bchAKx=X2*#XS-2SOdJ_!d_|s*dge{VV?0 zJerj>v9XO^XHd45I~iAl4nfZL5#>NhP@7E8mSd5T!v-_z6>BD?B9;D`3QkfoG1P-x zf2G@VT%^BU0Bwn5n-S{PNwa9wmKqJ)7f1LET!ZfE-*eR-C-Wkn2aMPY#K^|VoP>&w#_oXSvvV-naEfnaE0?!7ad}%{ugo?cd{(2PY@I`LyIMLkFL~6y0K|L z)q3CFxt{Vem~EOffs;#qsES6T6}qo4>IBTQJQ`#P4Ei6Bn%EV_+eV^@fr{l4gQ>E9 z;-LkH*`Mu<8lV`S{iIqWw+!kA>S}igrL4_4?ix0qeOIs*v!g*IO{LdokB?F&2-)xo z8hw$dc`!9PY}TD^5c9N}suCkFdk#t%{@k%5iWfz&L14*y+g>g-emioK)i+v=OzsKx z1O1baou?+pml^iSD`9$rTYk(v=v|8+D?YQGo+knr5f`2H4jk(bqr;;*hm4q-$Fy0; zU1KVLCIP!S7G?fc@u%urguo6t(zOL7T@i9m|pE6v!?AJ9+EC&TX!yTg-qEo+;_RxQ;Cc&#-U_$&4_4z1+T(ZU672zB9t z92KHJn6X&-NIB|X-WsC@OCI(Vy`x7J*;Mkdb4@mJf1L53EL} z5Ue{ad)C>S34&Mpu}QbfquZ-rIgm%D`|vjMUy@n%M|&Ubxw_#3>pU24dpYyB_&6bY zkgL$DwfWqNy=0|%ses2U8p&NxnWR>+bu!M(*VFEyq@ox!y^Uog0=jFIuRAMbCj@hr zBkdN=5z7th*wsa>n*(Ii@u$v941^djebBr5Q4Y!WsG$t^NrfOE`F46IUK1%&U|4$r z1vBJ;kcutI8u;Vo&r!&pyv+QIqD`A{&FOXge;!|?zTTbRytto#%wyj>fx^R2ix8bu zET8X$HpdCw>K7NWeVBL*HSr0criJY3zUW<8@3`X#Z*mo}uG39th@7d|fB7+IJV1ee ziu;7qxcSCBl^!C@!d9HNi!ofCt+q5s)itaR%uo{SZ}cK<*gtzvNc-hM7bnB;usj|v zZ8s@6@>b1tNh|Qz6K#7>Mpf8=_ar4y*VE;L{xt4==y~}^pN+WfF(Qo97z^~`aQ&kg zrLUPqd#U;|!&0Vlh65wH=q%_&{x2Yp#eNbe`bLk^+~F#t3wAZzs8u3Z5dTaYN3JaF zjllEN5x)vCrY&1u8)@b9xZNK99=%Z6Z>O*h%>+@@NDO4`|11;=k){>rIi?#4yB?=b72eV8Jps%x2 z!HvO@*qt|+e>KKZt_|~9IqGUGU@Lm1t^ChywKL)JV+!tvM^*)(NHj!_)iZyzAZM}i zMIuu=#;zbq7pJ$fr}cR&>YTtsu*-_6>Az#vS(I$}OZc8`pRH`<{fzTJd6SQpq-)@e zv52It^P$%~$5GZL$dWQU}BC!;@>=54f|A znw^%OJod^x!T*7z-#BBrg~9zdk>+(~SsX^s4HLU#f8p;y|8|n)86rd&aZ@flfEscJ8M&KQT7wlt!CQc3Y>Ow??jCorTwB;kb3y% zz(2|KlcTe2#(CCL`&ZSv?fPe1W5V!v+i}>Ht9ivZ-^yAcj?-eBCMfS<0~mnED2H?i zL(S>WzfO}_y+uIv@P>D)t%iW^tflTj_Wp{cR#Ou63vZQ@brAF3oLXM?1j)q_M9}R7 z1awH(b=TvGS<;v5r~SmUq+u6I&nz9PP8gC@g7J6r_0$~ZlSH0rI2pDH?~RJ!9il=^ z4Ou>vtyGwO%Tg67oRtz0jf_y!$xvJrDrU;$!V8B}b%sw0WNM78HA!o<`TNM!zb{*m z*nKn)Dai(AQ!WwhGHQ95$~F%0%Q`bdB?p!0c+qTUo&^i7g^0~12Fv1e&PepECzyJ7 zxjX;sviG-cWwz(aZ(!M*CzD1FC&iI)mgr9_3(#8IqJ|Rt(HeMb%W7@2k6LLk&p?l& z0&A)Yhuen_Wo)slUja<|?{C1Zc=s2<|0H}@CE3w@53sNQqgH2g8*wVvjP}cCg3-ae zmK%uUhM}PIDo1AZAe*)9(s%jk8R-ZyHT|U1oZ7pLQp<*pk7_f*%va*8=4ejELczYB z8lkqu&tspT(~7W550lB#PKq5~IU^JqkKTlh7v!dbGj#}DQ`#o_N1QAXs)6#)pAQ*W zJsBP0I+3Wbe@8KBp;D;9iP9HM+O@VgempXBUTyI~>sbrJ$RU;9m~c%;JD>Mohqbsv z)+b1EZ^2;f5qBmd7w8~WgM-2Q{#&><3e~ZT$-Onp6E2vVnM~Cvrfn)oN{#HU?RH33 zFkGk;Uq?zhXbo>4N(O@A^la3N^Y2$u67u?-+Pzs8DE{p9ILhI>{JAmSZROXLdCoFX zLiwH(kUu#0f;TE-d|yaK#A{p%Re;U^QkugpgABO%NcND_^fH>i9LOy1jhSzf>>?aALL{WudA#}eN6k=#$^qT%vB|El!{t)O(MC{1f7Tf$npYRu2Wp= z{@Op><=vyyrF5*P4{RTa=)nA$i?2Bw@kEVV7eN!v)S?Qw#Y6_hDWSu_lgb)*i5n7l zYulj!i`0adTnlsqZqEaU?RlG56jr^K&2+D^GiEWW-zMr)lBzi8S3bN=a&7S~k`pm; z)ZY4KM=N&*U0ZbiU9r{OkttT|^B^sjxE&mwWNE{C`qd=Ct)0Pc{&DEW5uV}JWgibB zIQtVscJ;G(!j%CE< zYcsPuq-k)hX;a7ZY%K?Fq@?~EaKM?=?!?Z$(_d4Qnj{a@M}KBonU1Nbh2_7HSlu=V zDg-=y-bmXe?}M(lz72i;(t}%(&-UT>;lQiJ!d100)xe>k>rl5~bNb(;Xw;BSjbhh= zd=Ee40eh-wI;V?lo>~65?>UI7?)Axg4iU$1g2wQd?`A9%#CNnfDE0y|W63gDIBm+| zGt}cY(=YK$YHOk=#J7oV18y!SoN+(a9^Nex(v`nOh_A?3Hj$CA3V~9S0fJ4R6;{f^+@OFjjdKo)c(#M+>*94p z_^bPt7w)-S2#Bp$m~D$8^co3USx(6bd*)XDTI}K5mhYFQFhE$=5vIlJesX4te1%Bc zv26$rEWBAPfBwc>EXd7GpkoAL+IP*1y)qtiV~Wq(WTfL^*OzO^w^l5bFqkbTHLnIc z+Sr9{zI&Cs=)p!+NYs(UB=KN*_}PaZYo(h&5SD={seIq6YRO9dU<^+?>7#3YE=x%& z`m`=&NJfi-Y+N0zU^(mqLiGxZD1kE_`xwuvqh^DVN6{tfEf(3DO>Aw{t>Qywpe^dV z$-MoB%>SB4FFvpgh^~nUMWx_Jlw9znT;>0OjQzey^e*};C{~_bVNGZ6PCUq;+tAbH zbBmRKZ{77$EzTEnSoVzrv8lB-O|mUg;GYuS3D+Qt7=(SnLY*XYEio?=KepFh=Wy`7 zzb@+sJ!tgq+fBI>NIX6$KYS!LxpzqShFfJL??w&yCsq%BHu-B~;cbGI6A`y4A(WPb zPcd2q${ZZ=Jm}*fQ(5P+Mys>%eCzShzhs1MSoH8_be>w=eOuijrI9dLdf-mdqnR^Y z_>tDtiGod^8jF#qWmK=!y{ZrC@oLbPw-!uB*^M!1+Z*I|w__i}8qXFcd2n34w7Bxp zLPHEqAVBACujps$`ip@PGQz`BzKm?8ajfSnj?)k{sABPHc!VjJ2fnI(O4DrU>`VGJ zOzB{_4-0lcvQTX=S6#j6d3%$>kroX{r-lxN3zaJrwTIf9Eckl(Y;NxGgbHy9Jc-;< z`6SxMH1~;#fIMXc2bD-|xBLZYiPXrvfF}IzdkmI3I^xB)_h0+g=+Z3@)VN%VuqR_& zUb=vx9*aLBu9W+&7eQ4E?1h2Vh89!FFJDxY)R8basbB5Af6WV&!;73Xtz6M%OnQ_m z`N_+RAlz9l?Ud?cfMwpAt|gW(r-@)iPHfr46rU{^W`&9RW#D0PaPpA+TVwsNOXfZ4 zFbjh0I@s5or!LlPfpRTUK6w|vWOx<(hr)It=R;J%^(A|ayVpp=LG)WnFrFjZCx+os zl$aQ$?31!OSrm>=oBd?kto7hz+)4+qG9(*9qfF2AdMd6YGFi~GoCEaC2;?~eU{O`Y zV=Z-JrR7Z)d)kwuqIoE&>`slDO}S^N>F8FshN@U>eCiV%1-K(yrwUkq?CFRE?=->^ z*k==D$sD7;nbR000tP(TQWf79utdYL9XyiTtdEOz|7f_=h(6Rk__;z&&ih8n{XQZr zqgFajl4y)+K5l4@g8{$er%ae+-`=IztM%?JQR5#z8b^DfBZ`TypP#yFXyo!Y@5=iA<*VkV@0dOt?~dJ4M?eeT=A_}^#fzy7>`lJd~X zs)`&$oY4W4*XZ9p%-Q`h&7~e|^14`%=`xj~52bXVvA^6}Y1~6yFK+R~Ux6)WZQaba zH3~%ZEK1IUutx=c=Z#^Rs|S9Vx6Sq#Ol-ElOpH;;UiN{aJ$vH@W2eA%gE|hj2+Qbe z`<0|p`C5CwlH!@{_pde?6>hkmXF@454Z?;qHOT*nkIw(6~c|v1vy^HxWyGQ zFlBnwKtL24X|!7DKJ@rVu!WM6=$5vYf|Dmr*8LZE^iD-O;l*Eo@7n`9 z>ic2b@0w3jDrSP(dr+Qj%NrzKQUPS&n6j9nJ1BOsFMvkwV`w=85aNzY848&(Jf#fr zoH(E5N};#5zIFx&sr~3A;>Q^M1}0ftWak%G^D}gJkl_~2Z=V7(G+d7PaJac)4>YvNJPFUZZ|srOQYv` zyS;C6{MuYo`X~|075x(0Rjg2jn zkMAT7HID{U19J_co~<$$XBrbb1`w*!;%_ZlBm?B^A(E|c^<{p=PA~6f`Hug&wz0p3 z?Hc8p&nf$G_PeJtUcy5hjy2{IYf^G6ayu5rf}m*}Vw7GClnQx@n#c9Dd*W+>ZN?$- zWa`4q(!R2KhkD+t{E>f54ZW-Ph{f4s7@S`l^@Hw2laA}e_Wn~a=W*gzv)e+Ipxo45 zV@qMWU%Z6Y+wj$oW7_Wi0*;7OO_{>l?aF^r1Pd;gJjCA-dxU>Pz!t{z`Ev_S0h9>; z$jQL2%$Vc_xkoM-+f&3b)zhdsS=DgBix625XSNIGP7@!0e_GG$NBM(@jV$(I{mUeP z-x*QNYztZ3htM2;+bbTbeAjI==S~kqA>JrF*-ME4RKnT$p(@bx;E{eQEJZW&Zbga!eY~ z-8P94%XoQS^Yypmh(Z1`xhe^s(1H-+kFe;}bw18Yv&Jn_?ze)*Ux0}htna|bOZ{Ri z^!1a~uB>I%lRr$0TVhyL0S-05lm8{%6;jP+B7%;5r&%A|G+G|n?T!yV?St$huKH`k zKdr>XN+rC=Voy87ufRnFdn4B34Lqk@kQYKZ2h`iKXo8FXu znQEHoC?@rK|Ep3C5AB^_&g_zo%)0WllT6HZf~!{^u@P*X!ndf76*4^KWq6JIBTZd~ zeX=puw{YWP(&l*P-VBiF+bT}XItbY$ikXpgk~I#U7XL*)fTy}vUXBmHu`y_TlJMLi z;9(4cp-|}ZZBlOGFZK1Xh=1MHz&HohX&rkokXuA_D3IWErcg1%EX1i_U$RVwe!UPO zue=rZ?iewfY*EsX`4m)>FLgmc*nd6*-R;}|o)cDRyGvG+lGT6eeF6crZK?I1*(-G;39NFUw#x8nAz;yec$iWPV{ADpTh z4~c)0G?b(qOf39Piuc%xKL)CD@{BRbpdeOQw_(EQg~n>L;76C92Q}jl^DU)$kiSLv zJh7=?xSqvLj(=?fk4sf-cqt#D+k)HP5awra|5+H#R&oq*7}90+Yh$fjr7A3%h^;B~ zrf>N|O>3#f=|lf%po4+EM|7plwcy&Inwoal$4eR=eTZZ&^CjeV2VJ>zg= zJA7|0XY`n)!Pvy$&&}6=W0hw^L0g-pGZ`Zpve~IiW~vcdHzP71t%>A`CxJM_fpRrD z>Sq51_ZTgT68imvca;}l<999TK^0$zf?NHgNA@$J*gdfj-=fW#gT}Bu2slBBz(A@cxvlkz=4}B`nDw_ulJ}1c?Yw2ZuU)e z^d&!S}6>Auu za?%3`3>BP{ZQMXp*U2ojkl@hCd2n0>2(xd?tfGBoEqfKa5v4|24x)VjJ=$ojov^h6 zvW1F>R-aZi9na))M=;4lFlRN6QsZ3Qml6)M|2ckm?PWRxh(4#c!nQ9G$I2n#0R8)5 zBmK;qNtGBVWCUFRIlrFW9nbBa3ihF8SwHK@wMdk@6sS=jY+4ywzv7grCe2`b_ISxc z&Uh9+th$ua5dG1sI~-|T(geU_+Dv#-8z1#!>wt=}Frzd_-X>K0j3pF8XVfyMbd|xg~U3YlOcly`0;l4JRfzrZ^Srr8J z%)x|xXnZJ6veBoZ*|4Gi^*Sho{;}A)vuiTmhcUF=X{s)MnW9XG(5i@?#HqFM_MpII zP~QmfgnmwgqFH@9sexh2fnf#%zQ862T`*vOS68|e{ovFxq|1!}KQx|&VKQ~RG2x@7 zYY5YyFj{l~^!2~#3Kbm__9!#hFz0UCwXuIt=G);gJu(OT-~0PHU4G@?msbf>2)h|J zm5PDCLDxA^%S>Y-C8J%9NVOI%8dzH2^?wXC+l8!zqPe8_$^aA{L9D{tI+;4u|h1~Hli)zldPc{0BiEBQqxO&iB##D<& ziHR~^qT2ZO`>m1)lHH*fpq>xg9VW7TtVyDFS$S0|ThvE0pN`J^AJhu#{~RG5kU7e`K5k~LS(0NoAqpEu@-p|J6Frr0DD9_(wV>)U(Uf< z5vzl7P%vp#tr6Gtud+3W>0AGze*daMSoBJ-)AteD@jyfJJJ3O#W(T_~X_E?KT=uq~_BWV}03VE+<`>nUcR*%xh z-Y~)OTAv4+!>(dEXVWmud!`+c&fg`=GG76jB$#p)3Qj&3C#0KCP^=6nd6Q!HTWMu68j*L5c`6~}Q6eJP^g|iG1J$3Z z>>sg@5Z&#&4J?aIHSx`1=iA$SW(>v1D?zT0F&uF%k5uiMi>fn#6Yw?=wp_OBc(_F1 z1Kq{^VS}*)RMy9AjO`}wNCfbdt-nO2%_)+9c7){K&Q zkmmDZZa6ETL*aO)>>Y1J2-~te8A2Bv%>a}n!01`uk)0fhVeHI!X5ka4{?nvRi03Lf z0;}h(4PTx_QCDZP$zntiDj;8- zSW3Q~g+eBmQ06HWE_!~gNFCZ)%bfQ$(RTKY&iCxtMR>^s(9{>IqPJM1+sAbCmO;1v(Dol8_wTl<96@9*FncV z(OSOnN-H?sQCS4^%KjU4m$v`ji_3~g-Oa-+IkP8xOpjU(P7S)_m$fJ{`;r^DqrR9p z1$7Uuw2i@`%8w2A9$R`lYCvMjOm$VOKAe<6d@>03{E9IVl@z>+d!D<0f} zc)@BWW96eWsxkR;s-duW!61GVoMM?O5i@_WZZ?DENclM26#i9aFAdyCRf&m0_uZ1e zTnds~z%3x;wj!E_j(Y| z1^vi#9sRn+fszX<q#BlP8qjuP~S6Ueo>S7Q_y^N~u631Ka_e^yk|E(y}^)0pwGRD-u8> z@jUvIf&A$f(a;&>S;+voa=6*fGo8Ool6z$Lscoc>WO7#?k#;W9PdO~CJDl;4=Tn$%@Kel{ z)EsQ`M(&x;7c4W!;oNnq@(P@3@F%}PP|FRnM5SI+ENiq%#oQ)4 zV=6cp$2mT=T^N$F`41=rqo1?N6tNtUk^wjaBO{*l4^u?nS*_TIdSD&FFm)P;r5mLR+Un>(;AI7E6b{d$=NJJACH0$!upGNgZ*~xfGGb3ru(3N;0eu zm%9&~ebRBapY~7Iq;?@&LWW5g#!EKg^A+9St_tUlp4jb9fo%}9O9Co5$Y)~sAa@w_ z^dmKPTX_+f6ou7++I~^fsXICho^$>cNzu0~ilJM(#$OB97{Me4_b2e6RxSDQMDk4v zL%Z&ofG2PruycX*&stTv^QJ6iwr9rZ)>#aWbJPHOcIl3NxvECq6CU#`d5?pcq98Ug z)O_8KbNyu@QIh})<}Se6LnjPABgi-%0SCAoj`f=QnRa%Lc@(M;D{k})h996fso~Qg zjxRNplP!QjP;h-i0(hgByDQn5*2^`<=_0v`7R~}n*g5Nrf1Z_AY3@bRqq%TT-wHzG zr*Z9Clg(}?*t7YI%dY`=$?69g!2D^AXl{|bxfL0R9%RheC#O!HzpYYGYjEX@R=%Y_ zKXM?*A+wD4?fx|D6Xwd{%8{InlOgpPBR;sP(#OcmkEo!Q5rheE<_A3w-oPHY!Rz>o z!t-!3`C;7gwc`a?j{8X4+dVsHiioe~WT-|vg`}#NK3F*331(IK266x_0RZJs<~?gl z63~S&wU?^^1{I~F+nz2p@bkjL{e z2N~V8b{wAk)~YFNbjB$y23>1n+DKtRE*RmAa^a6po2kc4=b_0Q^Ha%w_aP*E$yLS! zi4}%8^*zRM#{iN$8i}n#s3{PPuTPh+upsbp>FN)yIdrW<_ma;eh+F1NsmK@ro!fdI zc=sNaT&#ShfhOrO_T&!OZ{R{Rp8Wgv^rg2DI&U(@>(NTK^PKg{;2u5u(Ql=qJ2Jbu zVh-aW9Qx;u*!;~a*iSE(Euds=q=m*n9dXw=%`{RhHSN4H2YH4U19%6xrjzj~VJutxXP(bRq#cYi5+T4b7vB$|6 zBpyj1HsA^15su^5st6^s1q|tc<#(eM2ad#me-MAIT#&9dcYN_U*P&rmrBIq#0#bDFyy;Czt^ta!&>K9w2q4)UZ4ZJ=^SVv$pDIN&)1e-PmG!0A)WhsKe1~Nw+ z9G*g)?cniI9YXF8_0MvtF(c-QL~We#8M~fy)31Jj6DMmHYo`|r0{qyLSDrZ;M(#2Q z+@y`YGsq*SHDm2}kRbAO>Fz)weEE@-0CUcFV2(QEdJsFtHAvZ7c;}7c+9PC0ilc*$ z2`*K8kTK4A>S`OdQ#X|MNd&CH(V>}&w{>P*pgx^2GmvV%1?Wp8*LQG~{?37vu{#Hl z@v8Cnvms;j3V#!v*INsflgxRbxRNum5RL)hDEU~N5ANe5zo599TyJhAd@ds!+=2!< zByRHaVnAneW{ABMhN@F1;@Ew2iBd3ZA*qY1b;Mv9^wX0jc|aHc^`C~jsPbdarbHqty0qNzkEb~GH0)8FrvC9!j8M7^oTGu)vB?LoY}MO)d#M$RS;qJz;4;5*KqHa( zeQAj`)Mx`KF}G;}Ao16)AB9SU$jI)kgMdhojE|;jIcz>hxde}LtgN;R7#&m~QR%_y zkEf+lxi?pgK$!|BSjR1&d;{t_n${B4--fiBcYpHuhyK*Frow6~Ozyqdv zsM&45z5tJ0 zfz)<6_pNz^rIfRvIOEG*r_-Dd=hl)qQ5m;wb~rmh10B6H_+)1lN)|7Wh&Xi(Dzm!a zZIX5ift(Bj&)`KzWn$iYwXM7;WE}Zxjy<}1pRHRE`8kj%2stC}s`k$(1E+6qtxQ6N z!?a@{@J@K>cp3FR+*R1CAw|EIqs^8DVgW44-}5;2>S|FM7cxgYK;z|P2P2PRQpa_G z14ed$Hjw16^T-@~Q_|riA3Mb%#sUequ=XUA{PXmuWCdBR?eJ1b2yB+!xGNGd^Bf<3 zzV#`NKO=dJO2tX}*bu|c`p%~A8Cmwr1>Ny5kAv_;W zPDkn1j!+|xFbR=uAx0xD{9R4~JdvK1pJ$FqSiQVvP)d+hkbmDK;0}E%Nh4vk452_J zu^JX^V}J&EJv;i*G^q*(YiR;5+`C4=4!q5@J~U= z{XME3_Pb<(8qk~_(Ro3O1w63%hjHpV9Mv|zGhE;?j?p3{TqB2GobDwUV~znM9dbqu zTfCm_P^^fJ6pMx;a2c`uNxz3ZeL9Y`0S(5pZxo_E^1kolTVe9!s5{R;Q^p5!YM|Dj zlI>%fVFXwYxK63HazQQgV+4$PWOOvBQtIb?(9W3L(loG%h`<;Oa7g2~0dNPkTCw{q z&$`z6)-&@gimtoZk}z94PaF(jfu6ad%$qP4_WH6qi9RLHd!6vt4NcTV5FBRe4z!vOz9Fkp$=OA>(J?S0M zQC2AgJ^ZyQxW`OohIs>w_dI0K(qU_y+!m}(>%H654aox%tWG+d_8c5zADOA{tt{A$ zmat*e0^yK_aJ^(JmKR(C0Jv$ zhUZ|*pEdouw{M{J$9@&G11C>(CtXc&=sUw=A+qf#u|}=Wz8n&qL2+&>Fi8hIJOJVQzPjO3iL0N5@gP`s8v5 z=hLNa3$|P&@2e-9NVaGtlxu!;1Ll-P4rDpW+QWYwjy_*r4_d2rtIg!%(dHx!mM!I$ z$5Ka3^NeGGj0|+uc1vqz@;o(ZY@#vd;y5B#2PE*Qq=B@N$pDUOq&B*KrFU~27E-`= z54b~e4^ffGIZ@kyN4sle*`r9Ld_#1%kQ>X_o<+zAvlYO>9SO%HBRuimum1pP=z7aC z-p6wbqlN{VOyGl`m=(a_4yPj>dQ!HPZxj#bylE*+AdWDZ&rI|upXb(~)6SQsOp1J| z%eV-nZ+Q3_3ONJsbBrE;3NDk-YeQXabce7J{jy0>2H4?^*%&Fu0J{N$$2lFkb*gU- zv+XLgMlpc0h9~eU2*=`S(`rzgn4&h*M!EZjGO9;RjPN-loM+y%(QgxTCAvCD#n*I> za6K|RaY;Lg%dxdLQ&K_qq52Q&MHHUrLikG;pYjru{sU3}0L*UV>OY+nQ$(83f5*T4 z3;G(h2mE`-_|ZjD0$o4vANUPfoA`c%`5GvyKu~}A30eODxZnB}M&EFMx&0`jff7Ui z03L_^-|(oc{{Z9WFZ14tDtZXH4_Q8=`PKOU0M{SK`q4#KKrl5wyK(jZ0NHB_)BZu@ z>mTx>ik1gvKjoL#`Wo}7KjYt5{{XLq{!c{|*c<*H{{WBsucV*NYi{fQK7Z|+f9Oj^ z6m$`hZ~p)vvVC5yFZ_Cs_Y?mBLNrlOi$y4Zv1r~d##6_cj_0LSBh*H)s6Quv6T#rgjA-}{gM0H8Hz+y4N^?YGiTL%+rIw* zk&4g$yRY=Fi%|Qr{{YuG{{WQ~Q(GH#IZN;L@AN~OPSiG_KXFv6Mp6BKH@AE2vP)$iy34nwI03ad00MAbV2?da%oSqg)oX*CI z+up_6h7Q8d!$-&F>2Bv_L#L{yOsA%;s7wbD5n$({bM-WLwDa(xlcE#j!+wSV6aXm5 z$p1cw3l;G}!$dFKgm`)T_rq_pgNbxmzueM4hY zb9YZ~U;n`1@1e=5>6zKNzw--g>l>R}+dI2^`)B7Dmsi&}@Y}n8xR3xS{{sv0^M8Q- zKX4Hta3Q0jqM%~@!-a(Gh4@AxLPewJM}HxwgJJGU%peeg36xJPsOrLE6a<};Sh!7K zlQIddF`xZ|_8+qUcfdmbe93MGMGWIla)H5LTFg0Rfkr7CtwHBX2JeX(PK7|#C z*2fv#+4z-Jm`zPPLs+s?u|Az@y2Tc2lrw~8%nmYL;0=YTXK*Pu01MDm8>DPG`!Uc{ z`vt*Bf*`bwER)&_38KS7XZv(#x#e_cURQ+Tl1D|LS4hY=Gv%n)DRa>N&EjXEOJ|KkH2weDZSysOuTx-mw7{zA~PAL!@ zV>gB{;6#};9)J#oS_IWL5Fkj4=c6QBLE99VR1@_jL?9mE~K z@OnC*L2yi#NgZE&lxw^c`vqFmy|1=@R!#{NUGjs@2q_(BmdG@Wjc7y2oy3B=){;af zT{%rqna|)8(DJ;jwRQ5=qbanP9NMQ*9h>5g7RjHi-f&hHI#DC!j^8WV$Fl5G_RuVw zQ8L0wcvRHyJKe{2zPn8wskZ7m8{2d@4P!@k*UI2hh&fXMmCVbTvTajG-P_KOYE>sd zr4bKW3Cy;)s0Fh`hew9Gs#h1oPBhrFcmyfIY}DU1azJ4fEjh1^T-dwZL1BZ}P)s|- zO=Vx2q_Qb!V!@{`&?4*4M%fJ0ZR*(KN)UIc&#j$q;a4MzGVZdVs|AlRO`kN}1CamMw^vCHcOWn`ZQ`M!=bJTw;$7oI}db8`Y74mkq8ykNQ>s9Ad8^JL**!uGZ5;(#UV69 zCYfcBGg(999tN1wiakQ=s5?SU!Jn;p7Cp)!emXTkcO=q98nHsVGRPmji4jzDccsPY z0z4!FU*k^2>h$4_Stv|tL}x^T@UZZsJve6qu>vhpO{Zb9cz#^SaUG!;pTTT8pu<9I zs_rTxGkcMbMSp{^O&SX`tR&bW(=Go*M}a?NlFBoLtTVzuUE89lUL)iv#kNgBWQxYJ zkZnEybQE$&!k0Ewcqttr_W5%zr1$1@JqB%?LdE}$mhdU=yV1YTeYmwAclt>W(VWf!ckkZkV+{K)S`CRT+l!W&d9~ib(cds zv(Uy~A6e?_B@|~CN_4T1qKq(i(gl;k$>Q@1wHN7pgehN{-w@{29wGDq6-g9|b)y4>97M$2)28Oo76Xb6%8WU?TqkPTvziX-ud(Wp!E7!5THd zb~Ah99{PD%c{Hu99U;d(ge#FJjzYfB#XPm^V*6*E*HEmi3}rRa5`=?Va2;h+OScwf z=J`;CKBS#0GGxZf5NDp?Sb|y>=HU}KU4f9;Rf*$92n!S5Xu(a8&ke2tDjPPI`h4h zgcSWfRIc4lsjSnePpI6`T(fu)5gVp=kA`^X2A`}cRS$HJHh_-bHmWccl*YvMV74&D z?p{hUD6CIC-X43xr3!z+*iWw|pcJM!U?EkTu?}U3tws1Pgnca6@hna>{4-j}L`_sS zn9}+oFU;g@xG9u1f&*cf4xwClbit}j5PLT6E$M$vNt{l zlj(d!s8CD-!wluUPAFEseclI#`Mi_@%(75-HFS1&%t+-?-t?#p9v>*CmVKUif{%9z z=KhBaC=UFe&paHMO<~rk4sGM``E0?LQD`F_3Y{MnB^W|D$@jQ+D2D3)Ts{JYpX=^@$7|C9>(?B5>;7@|W+hfM0&%!Yy+p$OPnkS|W52*O`%RFNAZ@06=CLSbqfRfMK?vSkp^A+YJQ1z}?ec)RGY%%g{6=RYR} zE(ELTdF;s=iiP%i0~^QdRLBtCL9ss(+wD*=w>5x$ORPkJ;&H<=bQ9$un0?cAp{j(l zoL2ufOZEL@ubjKA@pMaoZJ(VRX1yL+gUjCak1t7_=1?GQL+Nx^H2|GtMD+k0>2w89s*ektF%6Y~eCM%Rk0aBmcLD)fW_$y& z^C145jnZr33;oD;=IAA5#r9wm!`_RLntqY`)zWa;+xGTct#Ut%t`4FFcorT=-8vIj ziP=>kU-93zS0ameD$q+(JxzaB)93t=Wi`2tPc z29Z2$oVsWyeUyTi-y44_RL8d2gYzyO3o|g~?{%t6=HZA~PSsO|KbGS78)0O%x(NLN z_EAdh2h;+)o&ng12t%EB*&?;C-JarjRhYpgSCfT}qU$J~;#lJP7hzu<7PbH5a;+%^ zb-_jRSqQO8*ur$tIty(}5I!3GHI4(+w4K6+@V=tbwT>scXxD{Ke!7i=u1W;uZwR#+ z$L)Uw8-QN{_8$r-|5HpDd@^hsip>rIH4+_uWma#HLFO!uyZ3pbbKCK;fGc4Z@N=2(4{TDc~?^gbs)yfGRW~Uh9XZLLeeWO0doA zrWV1X#6DZlHA3e>7Ag1$XQW)84pwatDhMbu#=M>k7e<5=ceRbzes;(kbz1eo-%|S( z8lqg@OlRA|;zFZi;;kv&L5W!+S^CtG^)^}Zd8vGcvV69U2sS6;HX%Fmog#Qm^H|IL z{Ku=!qB~^ovfw@qJFD~?ET=USs|it^Ou{&smzV{of0}YYGX}-hS`Td3)Rg z`tsAsqWb(Vb;LWH_rTECHsn_1_s@X7hO@y^yTkTI< z83DgucY|(+iKlF-;g-MmjLG*CBVM{C zi~suLUJi^;mFD}Qydmbz$XBovb5+5pjfY?8L{1V^#Un@c3@Fd0%_q6^91rz;2GGFg zjH>@I*0+lLJRa)5{NNeu-RT!W@1~?eF>a<74}zPli`JkvONTliQG+Y3T3!Pr4*9Dh9l%IAM-V$3+ZBTJ-gIj@Qmrsu&DjzWJTrV-umaki#Lui{MAP0n2u&K zZBae3U5Br5;k9P~p?8o@{JnC|lrriGqYAyMpj#RJm^z^-E&#k_fP2pQjbe#hqRykT zuJW8OPgkd$rsT&s3YW-Fy|u4g1P$4APamd5D;#y2!e$m>;=IWeX%9m*)NdrWzEmnL<#XK1BSJ-<;gRAwn&*6Bb#}E|FqKus`{WJ_)e;FZ>h|1k0%+H%)2b zkWE=_MDSpGtUNpkYc-5IRM0kJ(Kf7EqXcIaQ}}txh!*E8f>6r71X)n`gy7fmcfAC8!BHBEW~piqvbdT@Zv7 zeZ^ioKlY6xr33^qKu^u4mcq@mY-eo6?57hm-N!EfNo|V|?F#6)Ud*ZTE}xI&ues{w zb~7*X66G@h4`y6zs;;{I;_HXgL-c@CboV$vUb#)*hTdG_*R#i5&P;Q46dMrqA6djQ+9!pgvhTkp{{h0b8nGMXyih?`S6%0tL8!rz>zLx&#wC zl=+4z+RJx=9s zrr1zo1BDcyoAMmh70G5E#~GxFrlx3Ax~fzK_mwrYsk$*1VDJig*N6)PgIsJ^tfEJL zhR5^%?-f=hLx-w5$1tv&0!1(13(zYHK=Om}RizAfk6!17Im?MBn0I-lq%v_nzMM+) zg(|0!3HZ@doXSF9l4KdIug4qldk%IG&xS5!My%DH#R^dB?=$AmY(|{!Yxw z0)c(Wcg-}2mw-wagTa@N?sY&bw}Hu4$7Qoax7$G5OSeZ?qT46MT+7}D(v=$J-iEks zO7&=NUXWh08k0KeD9SJ99%N%dPIM#{H1EORaMFW;C(dY%xz~XbXD|3pt;1o)0pB;g zu0xj%P_xmV0kf-)7lH}=p^6BXcShq}ANX6Er?JklaQ7zZ2aCQp{tj&<`Geowj_~M1 z?6-rF!L5M^E@rW0v1fo*)yBbzS>}hw8BmW%c*wUxw|f zmlU>TOUy+jd~^6VQ1#Q`{VKZ3XMiM9j3aS+r%bV5jRS!zlg&ozk-F;@IW$zi;IRy! zw%BiD7masL4`y3($asF7AI3R3`|c>7?rg}!`RkN;2lkkq<<@l4Pfd@G`$x6hiObyEh+p@sJlnseL#vI80ox>B-00|=vB86_%e+O@S24K=g z)k&d``AOMm)-{^jPAxt?8GK<9vtDa(^Fjq|x2bnNu{{IA26|5S^Q}Z!ls))-@;-`G z(fZl%lt2S!DJivHr1KmZ-#RLh7E`x_ldG!Y?M>S25UfJqb4(uB4|IsggOC2>^&-=V zR9>+&xP%IXR(9<-v4wDCbn}Ty6S{}%$-^g_tMb~b>#-3fFx7@E9s%kiNO7%S)fk_B z{}8g*w^fBts!M?qKqD8P3azrN4N5XH)hS)eP*iPh$gkM|YV{H4h zF{bQ*D0D#v!Nx-EMEoX3B*LnL6~yf;$+G*fT0*c^$z?0BQ{$%LM=XJp=Sr` zHLxtPPbrA%iN2YiymHu;3eX511S zp*XsZ+reLTMn0uPawGCY8}!&(#N8;g?2%nZ)f-Tt2-hlxxB=q>JLK#JA&TjOf9VTh z27+!K?m&@T#B~vQ;q`@57wGW%zp7TG+9b)IF8I0#0U~r@EKjQX-sZ~{-_#qR7CmPA z*|#gH43xXM@7~xL;jL^xsLo1To7aS=zo;y>efTXHf33W~ zS-+}^ zy_{sa_j1_cRK&XhmRbg1rc9~j!J1@ACh59bJ3@tn!N(st@DD${DnN5n+1VNo(-+Zi zCpc}dER)JB@Ih}Xw-f3f4Gp2eMZ`9jJZWX~jcnt;4Z!V8Uox@muwd#N+cR}p4%YsCUyDd-|zBRd3 z$mY)8qIXlvGr%%`_(F=S<#Ag^W^e@_hY~p0F8py}<&1e+RbU5WJH0&6e%Yws!;BC~ zxP#`!FViAqyvHwNkjov~$quzZ`u<-}gETymEdwui=LW6(2Roe-NF}%8gM1(UPBTVH z$@ChrzSogtF9Caf2{ymJ$4!zcF?=}Sfoxwr0~n9aOfdbe2~Eg)U!yu-mJ>kcB_2aJ zWCrI{F=R+C9|{KHX*&@z7guDWKV^vLVaB#uPX`CKUQ4?zy`|3pdQviK7Xo9~`2Gg- zx6V<`HvBK7nN{2{#P0RpKltjGjg+5167_+DDh`L?vei5-r=4YKB2RxB=@SPB_}vqQ zSkF^smPzg~&eGHi*SSgPvevLBj?TfGxdu5M-gSNunVXUPtBE^;@s+S(YUj1!SVkV+ z?Sb?(vBxnNR6(n}jiI0$jezlcm9EMqv)$9(SMT1DeO9UOy-UmG-sJESk&C|2+(YEbe_uQlFCQDraEvZm+=iI?gZk%w6 z1$1-(Eyp?Mp-jl@-=}kGAIA)c$5fB@4W6tXwREpD{nYf1=AS-~e&k$Aue{COW*exJ zu|>}zYYX5W&qLJp7E-t=HEkOQ>$S-iieXC2wvCZ$_d4}Z*ok_5bO`!_v0j1rL<7*+ zNGM#K8gobLaxxZ4C1OZf}diH85JdhJDsfz*Mi1E6Xe)vHsOt&eaIl}OW_>(qWsrVn41?I3*0=H%f(n;Ii)J$n})yL(hGgEXRMHb=sz zhU$M0LQHsQcUKy-|ByjWH5n#^7?MwD@?&`x^;$%sL=$7A^w;3+`My`AxlWnMb{XfKmiyO@R_cC7WMD=hbt#lnAwTgeJs z+k&QEoeZX0|20|&1e35VkV-d+7Um#|_YARxlhApUiM;+emnt&AO8^E`&2kk{OTbX8 zWBvd@`pUz;tTQTQNp}I-YzdF-o~SOKmnES>lwp94h{_8)BxVXheLsAR@hVe6sYBD7 z7?U6(l);t{&1ZQb;GzP^K}t3a+Hj6*$Ai27R4#%?Jnd+8c)!>fHd0REsF(dMs}!i) zIch1XA+EvRVmzCRo$5STX^GN9TBr@L`jP1@Z1lGN{W9s)wi3jxi|cd;G zLBP&tes<*>q7~tDgTb=du$Fw)h@3(UngRAx3y1j!q;G@hMN`?2w)wp3)ffC*wo8`& zbf!6RzK~Hr-jKfEXZR63cz;=pdY;nlz(|b0 z{g4{ZiM}tVEn&of6iDfu@yX#hjvyLn$`7SXS!-k{IsXlMHCyfXIBKz0K>Ld4nZb~J#%-*R{mTG zJU>qRwrsm`=DRLP^gamLB%OB`?oZNfoM7J%Z-B49duJ8c3^u?kwzszvf zpS@lV+*=`NRj!!NCxDH*^JIE!CAxq4(qQ}4Cvr3Nbt(LQgAOU{;w+yEyfeFG=H7r; z^VH0^Ewex~THYEcf?>PRk3~Ctfn6iYrMhrYCiChBk=xWiyu3?a1yw$9N-<(94~jBU zt@`mkg-U;d`oF!Gh4<~8O}{cM{#^Qgky?b}V|~^VbJ~3W>S@zF?O3*oBPtbbs;~Zh z5Sp-`A8&*y?Tepf+r8O=nwf7qC36AUdJuB7)`4*=eexFj|2AzOS6N zoh#^Q5K-lY2I%u&yrb_FD)6$T*@n))<8owfOH+(sNljvPBP0$6J_GLBi}cjKg&F6| zmDV3dz1C?!6b5r9sw*?z(X&s+w(2dL0ew6W`8c9p&QOfWuQcxKk0>D4{)WaaP%vlq z>M*2vSq6XgWSWW1tmJ+yoRY!sq)Tuv7^GD$&b@qNWIu93hN)3wFM*xgVhpZbYLwS=v0?l|zTd&}>LGuPh(OWNWV?NVphz?nk2VIsDIn5+8eTm+aD3UheIa=f>M%8s<7s>*e9(ZnMGO)10@1pE&<*@$yOec$LiB zw?L|&s<|K^N>Ts!iyz`m-+g@sM8v?Y?T(1cQobmrrnI!m>_3bMbcWug<(m9# zBY+gYthZ5%V_@ej_vKzGGY_<;DM6HBs%TPkJvX?~*->@6e*W-LGRrh~LNyvc!*`L9 zbYE_y1wL4Q>NyMfctPf8$D_TnSMc=u!mVh)g)hkC@9GuFzV7(18R|MSUN|dLbIezJEmDlKT7Y7ePKWN*7<<(5c|{#-@RxSGH1B>YX$xr%fejG?`^P z_#m#cEVr0PyL%;N@{)~(I9IpCBe6Xt%rf7}@J-*z9diq2)Tyc?kR{M#Pnqwh@~1f} zP`KmABHA~Th@J=$z;zIc6ssO(Ow$8&kBOugYIzAo0T!r2#~7$fsM)(TwMbZ|)N2tD zoUu!)6g#kvWvWlYj=FS*By#$tJifh2`JQ@h+_ZaKuYuH_i%%JLmPpQJVa_|oK1!@j zy%x4EAY#am0Ln5cq4!q^%z%j0gjAO$s3%$ruIe>WV%@#w2x8!yqg$o95FiR2MGS+y zkK|$y&CI{Hn1Lzoh{iC{LOd@^@qCsR*C(yKjVju7rJ)SvEcpx!Nze`si8E%zqz@K= z$#L->8-8-#avIjo3@;;@epwuETV5hx%&h>5n$@IH$gLJQb)Rh?Nhb<+AKRkML~a*- zEN?|ymm%@jCA;JK;`iO~8DP^LsxJD_skt-$<}T`tf9mm2xIX5_4St^_rM|kTukD?s z?zr^k&z^lLxYsMPmpsBkp~tIdGsb3G|J8jUPVp&5XhE99wR?czQ5rBwlV0vd86|D+ zAZxds7^dj3F45u#t&KCdOaz`SX$V=X@FPaYo<1ds9Z=@t-%*^n)TEX8V}qib<0`*;lls9_g#%db(D6w6`k-w&!m=rsJg#b zoIkL5C@sN8#c{$bamLQ9O!yeGoGASrRNSq8Yi;%y^7Ye~MvpJY#I#^X<{ugCh|NtZ zSs(SIK)Jb_z$+ z;F1rJteu*D288kn$;9P};aa8l>Waxj-#UUdwM-3qU1Y@X;#X7bO1)Ja^to6&+Up~> zHnVXhdw@PxZ;{;O?mMl{vf$D3^?4WG=qi6m7Ec6GO(o)r?s2awYuZs2h3A5~HX54q z=F#)P+pRAC7lGS+A~l<=ZxS0A8551kTiBRtTZEy3ALlR0di}NT8NKUJdQo=gzP2H1 zE;Z-yO#)*`zq^!DOp)v&zi7XMob} zytimo^MLQ(&rI5NNTmMIZ9C&{SU&u&f$?l}uJrn;g)zD;`gBn4vG8 zMnlWQvitN5SP&$lJ$b0Rz8Ux}DM%J0q+8bTIW^+bM}~Ws2?g>S_1K85KSu}gT(H1AuCtSTUp(x<((p3K zVnHxlX%>L{ChX*1B6Vrs3hmJ##&W!WIL$S%BXcRdEw`hbAoYpkf$c1?ZGFBD52NQM z-O#Zrv}V2-_)!^LIFw)+K?Pv0BnZy(wjf@fAwk0ZF~?N}N$E}%oI#J|{f z2}Vklw%w<*$%!AgDy0dl;O!aULLFBuk6yw_8>%_F33@A2O zrtf=;2erCRon9RWkdgd(>1Ogqo;Qiv?Q24zlNMDLykwoVKQ?HcrZV8i9wPOPFYKXB zwgZFxm}FQCzEE1S@hqu`4WO`+Oax9n1BmOuzT-XZRHuhiirYHO4AoF-$+r<_N zNqjAVj;}yHqMeoK>0Nh=APpNAXUDC$C8}c;rkG5U_drSAMy4M&CFFGL?cY2rG3AEe zrH;SI#n)$tDeQI36Kr@aGliNV9|#y5+xz+orRmp*C69*DSg=w7X}y@XyfrcXrrYgQNr|nOQ(bEMeYx~8y=XqC7#E0a+u!2{ej;oykub%-QThsl? z*g8ChDZwAjWLXd?X%$0 zPkWl!kiqx0c@*Kn8u(cG4mR6bbDc;IcPWX7KSiGeHD9^p75%mykCRjgo4KUX37WLt zOMV9MOzCY+Cl8VjP;xXxe#9{S!?0PI&uOjfTr(miU1u8(o2WWObD%H{nIfju*iQ8<=KUJO^mnE&)gRHL7tG>EU6p$ulg>x^Ixb`bA z8xF%=-|<68A94Q{_jW7nG08uJ>h+=jK^}_0FO+@pK+hT07-1coK-w-GObz^H$0hiA zmqAlY0~$V&$SU>nN95cB*4Z6*tNt*AlWGy<~C_2B%rX*W%M34H!o*cX%X+dQ$JgR9%}=HIdKZu8E41-ki}#g3xVU?hV5z@pY3!dQ2Uy9z{pCJwFN0T3`A(Ir`;}9VV$vf2 zX`DWu)vk7}*X?h-h7TA_N4SEGE-zPlD7iVfrB=Eo3@DxP?RBu6{KbA6`fxl0oS5Bs zpej91V)zsp{A_Yo$c%?6X)8R=Pbx>XQ2dV1>4}#Fhb!P~XMl-fy}|XqyMJ3`4csaxziGkL`|G$z(M7!oXKvYkSJ|-`6NX9(5KJ5 zFw5l*bkxUYDrM)676xqpyPX64m=Yz{lVaP&lH)Ce3qqU({LGdC-BZH=eg$pg>lFEE z3rOgmsfivbi@M>W%;vCneoU$G)fwTDYui22Twvp1zrbL_XJbN|%ppUZifsCnV7vJ- zz7ugO@@PBG{2Z6+kD`pT6gvdb0d*s~ftI9PBPv28Jo5bNchlME>=2uUI4i2K54399 zVkO}>P@GY!u0bV<$+*luNmij2G^#=WPvZBWkf^{R5i6aM@IgW8=@O}MNw)9J(W5U| z6VCd3uM*V*gQK>4*UQ%I)atatseyb`y?S~DEh5B9+<1kz4~vlbo50(vo#VjT-$D`h zjBi*_G0v}3PdaQawp&>@UiBU1qroG(i38gMa>KYcd5UHSpDcf0)LPHTZ6MX;shmXZ zSipCJN@2!^+z;<+ep@?xV>_U2;!pMVy|yq#pJjE~z!2MRmw#W+0@?{{o2!p@hFk6z zr0P`1ByAu3SiED@{gtY!ZSFm3rJANVXiY~&5R~it=}(+s;%UQCA=Jdg-|PZ3rfT^# z9yG}`jVEjxr;49}pa&mDMxi99e<_hIAM%O`zi?c^{nE&R!C?czaHk zlWUxT!Op9?FTOGGZO`#7{`12^Gwy{ql}I+UQ#pfNI4;%8DQD5p?RDlD} zK*1F9((Y}v^O4LWdtNaI?_1{@+DB$u7VjhsjTkxQPzRE(hT-r+{xqC9A!OaWl{OAb}lL| z*0`7-aMmGjxhD2z%M;*?J6)aBzBzZecXMsg4oOe>S(4weE?{%#V$^g#M-5hr!@StR zRU5xp+9%*>lC$k7wGk^?%}vSo;?rcr93@1o%a((T?SeU%l`$S)pb=R7YzBX0PJuKM z2?R7V*ItLu09R-0)AgAO(;?u~NG;m@HOhThzufA*4MxgQ6tIo0Tt<0#E%< zTpyKGWquvsaB1XFkN!yzyy^>d-WQ-nZ1RF=$8}AE4|nzKx?H?NMVhtM?Wup6+Y`nG zE-3>; zEDJELzF7y3?597upmkW~?D=DF;V_Us)ezQOebCQRNAA2*#vgDjGh-=vwPzyiFjZ*% z%HbK1#8;yBmU7E%wq&FOcVTg#7O$|y8?R-YKf$;Uzgx2JzTYZ|Pi94lroVj(zC{|9 z$1%a&V!}8U-BZs((iHzj;Db=FkA*GH24e^@+xU{U@9jxghvj`f&OQxkQ+%JFkz(Rv zYN(z7>4!xjQ2#+p5Kh@mB?!JkQ|!6r@NU|W++Wza)ljw6$5A=|rZqcUAwled(LJsx z4kPzVrP|M_Pwnv+61KW!8WNTJ1C;CPQWCmwqzz7-(nFitw5typ3Ol_dnFVMtX3Z;B zFHm_%f*eO+;6xxHoK#BS{jrDc_cwi6B1y+q+GKQW=egc{zkjc~&q^HDou*T`U#I7nK;h6Fq(6+6>XF%fSbUC}mR7Z2b`|p7xzAQ~>+w}-=LhfK3 z<8iJH)h2op#+-H~Bnf@ZmL1f5&USsT*Ux~O;x9$E=~;E{ZI-35_qO94Mei>9rvT%x zh8tJ?TYNW80$g`z4b!8|I-4i#9{d~f!F6#mf@|eTi?>ecfwt-&U{*@YUQPGmDel{K zTEfRouB!o_!W0jxY|hwbLO*_bgYfuB2^jjV_>(t(f#lilH(X_0^hZG>t}dlH_MYd@ zfK8o~=`Qnq`N_{(G8fV!e;)V=qFR;B3+>~WD|JwlC$ScZ5p_VoQ_HQa6)o&LpM8*V(eqjm7uKNN?oFQ=eU1=U~b)0y3E;~y8 z?eRc|uJO*7!?kSPDJ!mwbgij`ve@@uNJR>9E<6WHk2wz=ESRa}OQzDE0TMM^zu>ld zJwr}6+YLhl`=;UL@4b^1t7dBF+owuaWYa}1(5zP|ORYueh!T8>^dUQSuKCE{a88Xn z{oH`I5ChulF3zUEoVH_og5lkx->B;b9~2@S62C4e`gvM37$(V|&ktD!KxrGF>K4G{ z6*BA8y`-;b$gpMc@S~u4`}`1b*iTf8_vOUD=SE^>Z}Ay!o&{Zw>%~| zyaiG<9XDe9Eqt5VYQ6?_ry|f$URhnPG-yO-!(_OvGA+%=-tga zdP>OA+QLce(3&jE0-|g%B*U>ssi{}{zJ12Fx73m_D92-i{J_ji)Be08Xo40gDi%r! zc?Jj+!fNAb$?1}xYC`$EOVqLGDmt;=JOh4SO9ikS9COHch)Et9d)(>Tn2RRX_mKmF zfoRkR#fR(46@dfw+xlm<8bsNQY9kJ0c`6r99iLssrx@zv!^-KaTLvO}{c1VAPnTaz zkj2*-<+hlfCQnIFZj$4YSC@+BS50!NZWzpy${s-RqF)XXoH%uaN9w|}=AXtIB4&4S zC~Z@0MOHX%+sWlI7Qpce{2PDsdGCvUy?L^XZyA`aYcRTFymG9AAt{^2Ur_$=)?5ag zyRKCu&H}OXW0+ytfqM+aQ)X+>H|7S2?AE-6+TLVsG` zm(|y3<_|4Dp=Mml%dmj8gMRiNp)B1 z=51x1yi6&FkWpsHunu^h0sI%bbYT4BLF=>#lmRpQg^OthhlB6~E({UJ)glr%)G=Bo zc}e%#Hf@W@Id+G7Ka_3SP+%00GtzxB=pj=3;To3g;O?`wKN*#>IvJ?+*IW}rXd(s_ z1~|0+G|VgDN=R+!e-l3 zuSXnq*HJ%^?Sujl|2VQ{F7_=wlwZ@#SFF1mX)<>*g$jgdgW~n^@T;%1QJ|C~+OC<< zuXqC?S-!nvE~UM9NXnvz0#UBUFEHuubov8qcaZqAwAp)TQknoC<6M3%FaE(tW0Mi) zvE9MJ8;4Tt>=~QIw$qz>Tk6L3X#F`B&i=wx!E?Qkc7c&v`+@D<*!yXeePs;c*s=$+ znc9Zdtex(cIq%->Nz{-R(M9Hx1+F(Dr4AEVS>|}0n}R>Cm9)&<>s(Z;*aIXV7~&*W7TqFJlqbC;}|E{qXix# zXsp08t-Os>TW=w-@k7-75KnG{H`36omlwxgeu2~PRkbB$eM1TNw9O-Tfv6na&~byL zRlx4J1!uBAzfO604sqMj#Eq%45(buG8VhlXhyH#GO~6@@cA}>d1y4o$FryvmHv^b{ z#vxh>+9xg$yW}RHg&VpnEWKgc<95NoNSjQ6CDp#OzHqxXGi`-J;YE%mUW*qRL!w{#Kyc{xXD_`-g?dfMV;|cur17x%u^ury$!l=ix!H})Ell8P$mawu) zO}OXU@I67~0&r3jo6;ucxap#PkO4doiZZykxp5yD?yA7J4nO0rNWYh}OVjmsSDCTa zv3bh&Z;*zlpSlAFTOqd&Gs8O+eM91vaLm;ah7Az=UdjHrE8J(z#6e|RAJlknujpCQ zRrk#nyfR{|1Ep9%{UFLnzCnIuN_OwNXl&|TxK?Ckni<&TGC~S}^Dt7FNoXEVD`%#B z;AU6fLHs(2C8d%jd5fvi<$UBXB%s4W6M2>YLS=JH!~g7FSGmNYgX#(Ay$Q7T+Dpf% z_f*4@iJd{}o|89?PmfUI>R_^=;O)><+Mdkd#dlqu?5J~bvk+Qr&j5!t?&2EPbstG; zti&0fO6iaOYHR9Fp7LQ0lnqV;a(MwS%s#bqI%Jaa#*n$aVIj>xbi-c#F9$j5gGo-d&m6REZ0Huia`^ zb($Sok|IEZcii*ri|2o;i*?cn(r9U%D(AUEac7iRRIhuJ6x!*(RexpSY9{i)^p=*u z-z=Xkx#az-z>A>SPd)hgnmZ&nBY}8R4-RW(wd45SFBX=MFJ4bOmJ3sw+iluS4pWtG zEz#1Aj0paErIisicII^`CDjn#YU23~9$}(jH0`ll5ZA?3engkLhGGF|dFus+HC_%% z{g^6CqFlfElG5f+3Mp^!lbjK>*F6Kt=aZ5I1=rG zBu??D`%9rJyLa6fS#vvp?y$pEqTiCZA$R(NUdve1k^~h?do)Z_$F(Mg$gk!X<)eA4 zZZrH#?tAF2S%t4EJN?*UYu7o99nE$1V4lC<DArimlVSGjvUOv&E`x3FgoTf zG>A`CM0Y6@(r5tdnLR(jq2>DS!@%2@XoI|T%ROnEcUeE&Vx+Nbe*|NtxnPdTY|>>& z$3k{<9?>9MiS4KvX-j$>vt!dO?+A1Gu+nY?Gdps6@7xcfj2A z>Zn5DX)(a8xK46$K@T!KkYrse<&gxhHf>IE&8kCw^<>8g`8kPVBl|BGLVZcBv3PyV zHse%S^0NHrn~MC6;0Y=q&M@7mrAKh0s#~qW1OECKyg(KPskm&%#Q*6M|v0cD~QI#V<2m& zzs_TB-O86dp7xf!h4}|LbTCtOT^nEbHwI-~@L$hGG06=vrnw<6(likxOMQvcqV@e( zR2NJ7jN~WF9XjEuvE#i<_K>McQe9VZp93Dk@2%fkombaj4Pq2~X31F#d@XM|4k8o4 z-eFe$g>a0eyFjUfeYc;lzbm=4O@qN5qj}Qe*c^iOm3N1+I7|FC$sS~o@o*G}p-9z} zm>jPwk_j^|wS^Zd;6b9`Zq+QI{+Y3DYK$NFpAwD7!=fv`k&19wbtlv)=NpJh7>0pLxTQPa9zk_p9JCI}M|B zK9T?|YxSyM2X~}<)kZuN;gFtqhG#ac7?Tv&dm<)LQ0Kl5bthR((!# zLly2Pa-2SH3x?Br&qVXW)s#c7e)gQ9n&8Qg+vJ{(Yu+oZ@a#Z_T_A&*ux5oJS}uGh z`}(~f1n3S90gvrQPedBBDhnN9+_e?{^VqksWOtupj^3`zrAe0izf9e%yI21aRB$a> z#_0o*)`e>h<&(G4e7Pt*kVOYnKGX%7wqfiX8Fzfy&e%`GjbMDd=RZcx9PV(xQgHYFls#yD*qb27MghS+C$LOPrzvt!zy`!V~Jw5R{* z`)xY7Os)_b{?kjqsQy~eC(kN2C!akXHowMFPbD#i-=ZXeWb#Y1uO!y{pE6r5w)toe~L zapvv^oUrLhKakPLO3Sc<2;2$ib6t(I&Wcym^pDuC>Ge%(#WpDr4?I>90)=0gE;pHLw6LfWel6^qo(L8>mMg1Ui=-^3GKG@b zH=VYXTy0jy)y@zceq)P_mM*KEn@wNtN9y<{zNbr*MXkQ`V^=ov>gVUMmA|rvHpAh0 zi63Na-~uEg2cZ4Y2L}jT*Cnm!u)d!ZiKOl^_)AtQ(Rgc0P`OP+5x&yr_j&Y;Fns*Q zOB{MmxA6{bQVmZ{)TGg*x72KY*LbaGu3J2>_7S-5Heevz5{v>Q030s-1Ll4x)Vx2b zs$4y`jMwMEI>azbYpL9$h{QUWaEozftoa4EGnF8cetdH@r_FD7?qBdu#+pfAvGhOe z%$s~4t|ykz?bVfhwo-+0VQ0ZoNgqs{dY-lGyM3$5{{Ux8_SF0}uS{b|g6eR@sT(T* ztEN~g@}L929Dp;|0=-+_zS|Ec?4#&>W+h&<{a@xsw-oVi?lKXGJ;1CB{{S86)*7Q} z@?0^xLx)6(h)y%f#yWaei+KCt&Yk0r7T@b)<3|GPXBU>N^8Bc&^ALyoymQkORmIY! z@ktuc!OpX@)6Dh(C?dE|82I&LzLQD+0EBPE7>Y>0)2yzPB)N)5Br965#!zv%^&Iv+ zYRku3@jr+uKGfb%0^$+5{F|S>lx80(KD|11uOBj#jWr(UepdwRHu@u8 z@5grYTFo8Cm*TMkuHzB7w`_!lIYRRS{{YWIzk&6xZ}wHx+UMdY##?3|>UER>&r&?= zumR(qoofjAe%0-+B!^9%V=%g@f^@+xX8;F3XE+>VImaJ>^Unv`U+Dfi@#eRuLw2(! zxqlfkT--$znDh2;5weEE9FNDQL9a6fDs&Y}me$Mj+g5s*xX%!wO}+2>c0TX~(5@@x zOWzXecaey0KHYA7L@G7;e|O=c8Tbrg)ZTm z;XDEHD&NO~{y42#&f>sCv&PbI!#?sba5%z_eMuPL7VHGIcVxJ@yE5+JX+sQEKR4c& z&F`953l{?lUDZ%aN1H{(WL(Il3^7#%*`*Uw<_i}00ZWmdDyN%H5>nhnHC4&XeP&meWj2cKi?DZ^UjiX>iT@{aGnGGue?9-yFj+^uA!!CI*HO6OMkXQG)`s#iQUNq__)I! zG0DL-_4S?Xch+!R#0ZXP)NvXGm*t)_#fkrmEoB#*Ie-~nSFaIor2#^^J0e^v-73F+@}b@kPqHm zoQ!iH7j9r*5@|%-i+hOTW=}6j`%HxGkf`0UgA9T4p2YU90!THTW?N^ne-k~V!C;Bw zvbTo}tN|NyxDrk|z~tkBUnz*IQl&{ny*_r|%IDExsMCcw%G&<`UC(Lpzr}qE#4u_X zo*VHz_WHbu7>*l_9%QwJSr-nVr_M*-BX{wf;{vsOBjTmjZ7gl>8J|*;)ZDu*tdPda z7$GBgM_nTGvJrr4c1XLIcHxH`8H++cqi0^Eg>NXM9Kry<2mCVwdvjgSz_@< zo?aWI{{V>`iXQUtO(7p^EHZ$Qyf$}d=FhcW4!qS!(^9&9K3-`Ni^J3QQlBop{{Z2b zpAj`Sx7Maq)mquo*@%d0*Kh^nua_DebB;mJrD9y`7dHlI{@eDL*{4#5<5!pw@(f@Q zr@sS^!@V9L@v+r4e-K9Zjbe29=CWAMwKQRZBAxzPMsc0Tq2nI8s#jW{n=1iz6xnIV z&2y;D{m<^G2d~r9y?pg`C0c6TU*>zXIb9e(CHWlR!<{Z$j|j&kntakU*P4Wm@1osI zS(@{CB)qu$!#F(TbIO|RE-YTx!M-VGwYjy8w9De{9-9r2X4!JePtMHExa4GxNhjuN z;{F?HOFx7ySn07l8;kRl(k~ya&uqR2PW+s6$Tino>Fa6WpB2a~7SX@5Z&Pv7g|nQ+ zaBvGqay@hE4nZ{L-?e_fsFGZtFVN<1{1&o!pTV9NxtmeAx06rPZ6?3ETbO5MD?cUT zkixCW#?z6(JPPM@KN!K`E6qjW((lTf7O`PGa*PzX1 z7M1aX!H;dG%WV*~(-I~76DAXJR{i91G+!^62?#UD=LCb|-VD<<&mVZM`$)395=Qad z`Ohr25~N56arF?>b2yw~7{ zYbf-&L?#(t84QgoHb{8gzyo3GGJUIi;t%Z6;opcJDY5Y-%3i~)+qyGJ1?WX}LZwCu zFym?LGr0597PLESPk|o{bjHziiwldp`-eA^-#xyR_Y4ur$Uq7PKp0$QQL=dqPMx2{ z%~JX$KiN7033qG_;xr6YF;Z|MVUC>%{x0Jc(}#+~Qnf7;IPnsdYPYGYS{&u$K^MkI zCyws)YBFgaT$Zt13rLnd%y*lHZLD*Fv@S;nI2Ga_v|Q4^?Mv`(FxNX)*1?=duB!u` zMsf2pkKXj?Ki#im@nY%L9~3^U095NXp}!*N6Vm zvwy~2d?t(wb@J^rh1(^Gh>YqplZ0LnpZ>jdVcpYB^na1hReS0Aqv$UX>4~RnUlDad zq{pZ|)Ygj2{h_3&xxvRQ%8r|Q;^mHZ;W~j)MfS^&7|x$+D-3=uNigte@~bG?P6!4? z$qVzgEoM6$fy@~cYgiPOfCt%Z&qIiDKp-XtC# z(%-|rBh~aN7fx_=$gZG_#^4Vt5XUE$q=UksDn>cZO-u0lPl=k{)rF3w<2`oDDOy)+ z*(0`QU){m6SbV??nNO~B?M(5#?zJa^Jh>A}n@UYS1-M;L!xz~SGlHumma_Rvmchc3 z0Vf<9^2xMqKF`BCWG$!oBwHJslMsD3P>K>+AoDh}u_dwwPq6f_LY$j|kNgwPs+Ti& zG=FJt1$fuQUN5oHB=J<*&7sqye>+%?DAC(^c11A5xM8w59PBw`iq`l)qUv5A@ZZ=P z)+@+mC5u8>i12JT0!Kb) zD#?(}GlJ3%;Af6&*Srg;*=hQIqL+p{CRt<{)Giwt!26rY$m6$s*J_mgoMOyeV|LN( z9}u-Ev|rf=#aC?%GDyB2yjF$ZD0i!=86%z>(z5Rgr@>t$5UNb~P4ba}{{Ry-{{R|a zjF$-Cu|JDsjpKNm!}FD7A0Y&up4`%3syD%HD+P9mZiZL>!cqRbbMIbMI(q*Ak?Bis zk@9YpI9~v2OcEA9Z?Xh$1Rvd9{{XaoeQUb#uatak;ONemr8{r2{prDyJWdbFN|TRH zc{#2VN?6Z;H5}gB$PN9LO38o^m36@-{=LO@-W87Bd~@J{lH1F)*x(lVvFC6{Cp_mL zO7EzXIbzy0JU3&zP1F%|O&&IQpG1egKZe@yNd6zCsMTx(`9Baf zHMia5-G%cfW4<@xwxaOkd2Uf_Z%kb%cq>#odC&&m2kIs>rFgF6R2zKv4dsarW?bWQ zw=tY3;i?^X#agF={uk?7pNO?D9ceHor)0lt)jl8TX)38pkaoq8j)d*p=jKD@h2lo9 z1=armYb{SvxLp@UcfZrGEF{wIF11Y#(#ly9Ngay;2l}GBzyVGh1(*^Ae4QE(4{QVR)+WCaI>YBgzGa znYes}0rneg=a5(sa(nP`)Mu6WbMXnhDdP)WZ|qTAO=EhJB!6@}ZegQyOKGRc9o$;GtH^@R>6zID*UCV6PTi^xo1Ld>?eps`YwVsL zwTZMl>EMS)jaKH?@^nddcyvfgh4}~03WB{H1|L0rW$=5(dUluakHK+1sx2fs1+SVs z(txnG1DtgsSC3#Thxm!3X&x-`J+wM^iJQZEHlLzj2`AHTR^AAte(6@O` z-ab+s^StBQ$Aq;X8#bq;>2TUyx&lmBH$vVZitM;7?tlT1FsDC&t}o(!j;WzoHTQ)+ zCSPjyk}dkm_L`LPwY!)tf-4wv_k6c-Fb-vG4!QK*1^)oE^_%x-?Il-5J1;G+?o@vK zFdJ>M+kpd-^6}3V*&5KPMa~a@UjAlY&UEUi&1`!|guE%?ZwYuyM6y|UIEXO!5pCfJ<``mD0;JvDSf3G8YvX>)IZ}=Xk z&QAva02+90VIu%dz#DRWnX{bx=koWi$biuRuIa$g zAP_N&zwxe0-&3%#xz}#>7Pd<`AyVe%3z?$; zvSTT2ByygAxZ|cyMnU}d7k7=V%iVrR{X+w$29^H+ty$`F&pp1P-@TI8Zkjr$ir~Q( zXZzcb5$Vjswq@H~w~4$%A=BCx@g>>wycw)9fNPrG*|x>09GLFl{pQ?cd8JC@DYyU+ zJ>lPs9v<-*jOWv=*1`xsvxFA+jGEj{aWJ-M#8E`IPQ)B>^7)^_51LS(kHk8Z(CLo} z)Fp&Jf;FZv3yppq_={8{DRiV9V?VzjjAlIHMQ6!6ZZW9c&dAC#))vtBkAOPfouc?t z#CIBwrEOzw`ow!~FYeF>KiQT9{_(bkJdSWW^ck;S*DNls{4aTP1*PTHv%tk##S1*D zM@Y^(&x3?Az-yM?KPf$M3je8$Cxo2(L5HG*1jRhkmkYUk>Ai z?`;gR&*I6Xgs_TjrddP$t}^=+U=n7}+!fpSGf&cOJU3|#m4}BdZIr&qv~7M%%T@W? zJ0)hzbCPq=k~>$BlX}0`{LfX|_WuAc@;3CXYRg%-`$dK17qDcnnQtg{1a%n)ueEsp z0K^ERJ~6xv9f91?zUd~JTRuh4Tu#27Fb7lg`KAp@OB*{mWsuw5GDjvLn8sB`3T-?d zs;!>bNn>p?^&xn#mZFJ}R zZ&1s_rNJ=iH}S}Q`hSge$5gu!T;gpKLnKT_VsJm>&J%ae%s{E9YR54TMAf zKIiM-^8U5e{>xUfpMV|+RST9$GOTm*J>}3OV3G*g++&f?TH++rm+c^?pBrL+41OAM zkMz0!0Eyd=ujA=l55PYZXer?@4&L6wEr*FbH67F%Ua4>o*hrP0MMv0oZU#xXDnU|t z$QYUxDngXpZm(}%p<0wGR7zIUUnA*-y}xZSwEO*g$<%U~n%UvDB$n!tXwVJ8L~wzVjN}kLZi6+g;r(M(xzep=)#tSu zjHU*5(rzaED=(V6N(Q@#Zfj9Rk-Oq!=`XF2!J z@z)%xRc*P-@9_B>MilA0C2Lr%qCp;!t7)+4mVz@pmn$rGh`}!Bw<@X`vc6zlk{o1k z8==i~*7yD+)4Ypm?{vu|8Cg8^k!2X?ozIfT=tt03A+K2Jdd8z{mzOqfvEIt%9`@0B zcSwoaFHYtg_}mEuoMDIun9?pUJQoGBuDxk}V{d40<}=BrtZ}N&tmuPt$UapqlFR~+ zzBYkf5vy7;Zky8qJ6uJEZA@{oRvI^_>00rR?{{0g4)jZ%lmbcGRGNc?^&=0 z$G{F5q)de4bFUnV`wQXzon_(w00-Rb-XyuU(jlHtGFk2>FgS11FO64r>~ub79cU7BTH#cK#W;*Y zgl}gr^KV_CZ2)o3NCj(*gX4S_akuuam2!-LJ2j3bMN*-;-LR6sFgrm9VIZ7}^k`z} z;v>x$_w+pKm`b#zW{*MFJY}NzUfSY)TKnv=!5^6Tjhy42nB%vvPHUCZK0J6!So6(= z<7ysD?PazpyBTa8J1RI983YC$ zJCW!s#Xbj%$MLn^m1>YlHSB5?CYcv=D*^ykAUAW+;~le_<)?_1Piav-Be!o}=X|hm zf`l5?@BT;E8W+ZW2UPz6gn&ep z86|-$#s*1K(+05q2>AZt{t|03HNcko^HA~{$z&+f+pU3&pO*wK;Ta@jll89HROY7T zeHFg;zs~0sN(%Ol{{Zl8eH9<=6QoHTZY`oA@DxXUj@ctWcL4PN01ma(Y92njveRdo zbfULvNQMdLP_hsM6(u*wjz(}AAPL}=3 zpl2D#&f{NB_`g$`^=YJ&#S08SZU_AxOBLa84hJNRegxN%ip5o;;-dHe0DyMkuuz;_ zs%me1I{2d(jwZady0y2`wHtQkKy*0ainQ-q#Di4U`h57MWEek7k4^-qZ~0?TuCEL>l}Y`C5}sSz?_`m z7UbY!d|UDt;YPJC%{JMd&D<)kmX2~0g%Gn!a6hjw?v|c+*YG~X_?~W?UDAwWT9)0Tm*uv-SZ%=(zDNY{ zM^bxZzHwe*s*6M6eLe2ujqcuif~~3Rd(nsiep8&4+Pt6kezm2hYSy~Nw}vk*t+aUN zk!}Z*sa!ftge1GxM9BM`4m$D4$gJgx+Aj%dxt~ji{UTD5T4_%y2PiTZ1TF?f4}6bG z_;)^exj5SI{QEQL)jwrwY2L=YyjG9l-BR-A9To2$8&LPg;%k*+%NAIi9mBU%*Cg|b z^bZ2s+gf;+Px3{loOEfowVEA{4cR#jv~C>!d9Hr${_-s=T4AVN3yj;7Z)>4OWx){! z3R!kA87G6rIIf?;x`f)Fiu9@ElTa%9JfFL|wu%VVzSD--sB#YroR34?)|gAG@qIP# z`keT!Xh`5>)kVIm<0TrGkUzC<&zJqKZHM}_=gUag{vhY^&0Vlz-cr5g%z=NkCbjB-{{X9wGmof0L7MJv^)_~wH-2Z& zKgw^lDSJoLU<2?Tymzh~YPKe%ch&x9ej5J556ON{v)!u(%-CBG+@nyt%ca?1VUHUV}0kjKy{2DMXXsxUzVK!4OXI~|yep6%DJ zFl&~pe$!9p{Z5D_$nN$}@;R>*cwRftjam-9Z>s63Z>0+ut}X6#@AS7|ti#K6jHx68 z%42Ui>_@5o%w7}lVthO)JUV8xHTI(F_sJ4E+(?bRRh?8R*w_o6askOFCz!O=Bh>Wy zHQNQa)GRd%aU^mE{oG(AVM8m(2*_f7Rq34b*U&x{cm^9y6HoBHz2)2%T6~Wzl1DSe zJgv0}e=$x$EXN=aI)DHGQ}(w?TgvHwnKGuqY+Dm3F?KJ!U02Ttl0wS0e zDC05+$t9vV3NqNoL0T#7n%nyBWa76^`umR%_>tnB8b6DEAGs}IYolFhw%3yCCShYC zFLxwoeB3iHLFF7B-0_20KNf%C8@u?K@bgsAG`%xg)uq+!^tHX4Qkq9=ba$DaC63`h z5rvTMkw{{3h64v%#r{3fF11VTKUeUe7W$IMrz zPTv&p=fnR11$ZCF8W)PR{ZC%|Zh$12ZDzN(*&;(6Z0a2f<8ZlE7*H@+pa9{Si>NFJVn2RM4nho(ob4{c z)_)mYweFJh5BN&Vu(N3TXYQp0ZJ0=`T;q}FI1>)o6_uxW$4~g7W$^?g?x|_5!)be^ zUTGS9sSJ@wcd%&`3D!b|L(B6iP(TN6bnkpPKjAF1xcV-bkyquk z1lubC*9^f|2dFihf2R1J*85AA2>g2Yul9zPYkT_(tD?Aw4brg)ssQ0Y+;BP5b2OXBkH~kzc%nj(tOJQ0A`S+U}XG?Ne!MzYV!-t8{zwSZ%Em3 z-~nvY+&7y39o6lv#GzmtR2D6~t_flQ=sO(x6zwRaulSr3Uh~w4<5i{Q=Z&pR<&TSs zwVC;KGp5?yXU%M|xK09|!xQz9y~f~HVSs}hO7!E`>s}r4W<41^ zUuPh<@MPAqSpD00FDxv}Z*Douu^-;R&N;7a@EyA92Usv%O>6|x2Nq{xZ;&6HNV#4y z)3?5Bx%?3@?vAU*e--qP9Dd2)7PLzQx`$El#+deZ-ehBL=4aT+gU&({K?5B*uDe-e zPlFZ!6<}q%#y{*Q9!KGlE9EZ;d5hq$+QE0pAWb$kWh{3$?`GtjV>r*}^{-&P_}2y9 z#rMOTjWSs8tVHwM-$ucsl@06?6?MQUM(hGI54V0<;rVJf(^pBS_(lGwTB@8cF1JUJ z=yz(K3B8(Y+jxrV7Fl&&p}%u=$5DgsefX}s!uo=L!e!wEI+c(Ty^AbjK3_6}@}2x- z@HjZdaXJ*md>MYm;S_zo)8=@O%loUYF~@H8-gr+_VdGB>LUkAu%=;XUo<`W270z@#@#Ccgb`SUM50LL8uRclz( z;1TGPNvUZwJ6SLhovI}L{GgGUamN|t^N*!@?k%mY@_qjR!oRP)-s`{h_nnW}qyGSe z+g+ULI(s_Y+z+)E{prld01qptT=Hx3KUdG&d16k)8&t@mH4;^rx} z3wi#AHMvk2zSGFY{Ga7rRl6*(=pKKN%jUX2+F7HF0Lzfhyn&2o9D#x=n|L?Gzh{wV z&~-?oP_9_mOMp25ledAKirnzFoi)FSJTGl?Ya%$*Bl8kih{!TVFmv=Z^iFZJcRZLv zSJ?Vr<3wH?wf&sDKrEo}bAzwxiofuWc$WV7EBSY4EN9Nw+vWhh3YTNbEqu4(LI%61 zZ43Uhk1kE$#5!lM<6moj*a;**vwf>OY63}hn|5=3;n-{?avxOyvZn*(NWlL9VIn6SGLFC9JB1x5&KuVl80VHroZl?#HIll?rLXyfSe=0c$ zKPvU)_9PB64m0UmP|lVz380b~ri`Z72vx_-aB>H*!0n3kZ+M=4E9!oXd^z!+mEg|_ z{4BNc4zD(mZ-3$Vrm>#l?b6C-Sd=tU?sNl(LI)#~0k1Lrp#K191kpT6rRzTq{844B zNi;V$w^AevEO3qRw!ozDpvDLzIppN?UmkeJ;ufLf3oQ#wz7e`;Ht%C^Dzg@7yvA@F zB#twIj=&DX9_RZj{>>ME8T2h!P2d+W440ntrp}@FhbI)Uo{y*2aA#jK4%>b zc+<=1VX6IMyuMqlPevHJm8a~fFQ1|6U$m#jy?a9O#;@URGgGkBbPJb^KtY3)AUaH_ywWlXmu z6=9xH0Y%;Nle}ZnwQm=B7P^Zc6od&K!7Seqr;lzU)cg5AE;eUB!{z6nAa3(t+WzF~ zpR|E9UmL7HX>CefSSHoI8$9+3v2ES{r)l!{V<&Rs*uwt+8awWJwf_KzFGu+w z3~5AM%_LLFBr!3`eXhh{H%@;Jz3bY(6Hbw6YXoGG8!O8rqW~mflD{_%j3~(T_Z8u~ zZM)n{DhRyOjum$2zAM^(4?!dvEU79;sNPi3mOvEl4i3}bg$Mox^{=dpwD&$c6u)%O zNPVKbl@=uIF zOW?h6OlDHoFc5(^=PWb8Af7?*Ul`r<3Hq5yvvBk`#Sx`ov9SwSo_+sAbr zsBEmAep!Je?@`Ms{vP?R;y(z@qt0LZ9vu?e*iUtGw|XpWp;{SLS)xQaUCiX+L2sEu z0L`B-_yuX=uZ(kep8LT1tLSk@cWWf=aV&8babs&3AH21liB^rzBy|TFCccDkgq|_d zt#vJHSJWE%=tGeDe1-2^fuG%F0+~2nxxmOB1$@m(#t)KGv$o$aL))hbO43T{ztr+C z4Qsk!&~D|A#JUPW_RBzIi@*%fp)kvZF>K3|gY*9Y2F5?~*1I1E>Uw?ehplFZQ_*kZ zV;~n6o*QjCHpGk$Ey&7Xj2}`9cdWk+!QwHdPn7t&Za&p)R4n%2YuF)u-dN}&`jAL#;hi!7frk3CNFZmnm%>Mx3 zpP%*(rRyPvr6<@{3n^Y1S(h_PvAl~Ge}p>?fZ*dG^v!((WRhu~6!5L=*II-Y*E)Ti zcJeon@du8|Lngv!Qg*C~mQ@1;dL6$$w~ZjSio!S3ENv%V-Q9T+YPa`S zkr}lsr~<5uCdGAySO&|1k?+p~IGXQ>^^Haqx72TDp7JE#eCZj$aq_m@3=ROn7|88j zdEx~Q=+`$4@U)u(4o8=a77NZ#BoEKhxarDMZV%yqq1`CPF^sos^**Gz@mGeUo+gu1 z(I3r_Ix)Jwow4?rTpS!@6Y65i)dz&+2UKs!II)vmgY#1 z9)dE3RPH;l888k`J{Q&eLvg2TX3r*DX#pr!P0Sdmz&$cP=;uAiCcEzk>i#3}?z3bp zbxYV{2rk1kp`eUqq($Tckeu1gXcj4p#}&+wVOo2Ib;$C-z+eCak^-JjOZb%>dfugPsOm6ULu{84q-!>t z9Ezza3Z8`3A-wi>63d~@I}S6t99 zVz)Xy>$J04$q-p%NW`k6V339%E^z7yJ#ca`JOlAwe-h~z4XNr<*wVATKJb!w9;mjBiSX)jTD7P3`XI!{JntaI3tSW zpqK4xx`fLtlETbYT;n8VcX7$&H#~klDaIV}lwWXFqXxNA^#-u|--$jtY2FplwF$0t z-EwH+zE-$-1KK5^-rKzS3Bo*#xpoEi6P#dlzqb#>6tMop@CKc+ZS5jmLi#iqUoe8U z@nCcSZjg?jJZH5>`w!W~*S;e0w}~%&i!DCi`##od$2VPTF$rPOOnxJ3T|wsC-?*ez!_?W)EW_*K1Mf4fhS(azyU@_v8h zzf;|RV?WwKH9bOag#IXVai!mfk4n9j$=Nac(G&bC=OsuyH@orfx+lS%H#gcYrDdfi zr=n^WSJraLZxmnJ5EDAG$UrQ9L`;<#5+Ean=kwFR9vHgOd^2^Z$1sOeS<+bL2Od;k zI3RKNvY_YbgI{}E__J2{@9-vlPs4howthd+BFMP4A~~)X9BnwtiB(ABZXI*LQBM&~ zQIeHCy88Dc4T+TFD9?ZC{{UV`o%o|%(7qPy+IN6_L8e-2(*Y0Kui%znvj&tLwhvGU zJs0Q+uUomF!9E1M(O1FN+HHiomZxWLBu#H~DVuez$tcn^GO~xvL@ax`QVAK!{Pgjs zi#$i-?L{@cUhekp@G9Gjtwu>2cL)?NS-xX{F_PPe1CzydIw!^-5NOsGW$*4T&Y!6! zrwA`6l?>^;ZcGqxOB~^`oE&C};o}AE6W@PRNaLk6snpqR>8JS~;qjV#{{V~9+udlA zXsxViag~Xri4oOO*{F!^O6bsy!64rJdEVmS#R-M#_ioOl$jp27~ zKgCzt{=IXkEY{FlENHVt{z@@o?xopP%VDGejt5$Y;wQ$78$S}uXQ#sj)!SZ1k85vg z8e-d5F%!FP;Q3AoB=iJVgj@V@@#D35zNrVfwD3mKGRdrR`+?CFKdE^{= zm+Z0NqvQVo8fqRi)tX7#%IV%IrW+$SPB%012QjJKPw=#nk@rn`)v9vGntHT%Qj(mM zDd@gOsq0$hzwG<)c}`S=jexCnWpVvVPYe8UFy`XL$Lv?J7n(9-a-gstyO14tEjRq&blH z0HPoDVzNF5{>$2r#Y?2VYuWVg4Pp(zNK-c_W*klI})y-!aHI zf(p{3$e>DsFkt->F z6p@Am;l5HxE96s;)#|vlc`d$Me2-h7+fz+S(PSDA>|ZiLW#Zi*#E|%hRJOenL8B&_ z1n}JLm;lB{xmfmW0z$3M0UJpnE6qGXr_`^eyxBa`N{suq0MZvMcQJhO za0v&TV4tOQUyQo3@eApiGurD~5?Wj}zNcYzJaLaCBe$Ek%{k22%N&-%@JBu;@yg!M zQ(XoPE)5PdZ$F(O7q2enl3;*IxezJy2_;t~DeM3vNaFDril6OD{kB$G@BMCTIyC4) z_;#1;w#T4+6Y!3`;ynTHyg2%-x@EqhD3WEhw{}t?R3k1)&d@QCIAVA_*G&eEuWEKn zr&u<#eScwa?;=I0+9RQmn zqA6BxGFUJoqMdrRwNTORe_L)vN)UvtDw_E(FH_xM(zUM|czeVerH$ymD=cs_$*f$H zCCYu7x1dP&mBThj>&HMVtRD@pJWoBWEpRk@FYM`b+uOTMQdhiR-Z)i`MvGu{IQPKD z2L`-W>*F2Yf||oy@YL^T;eB!gs7a^88U}Y_(HQ`3!4(Q^I8`JJF*pXfX+LV89ci}` zT-w}1G{;!uwX`7`IO7@RB8`I<+Q1SRC;;#X8)7rWySjEy@*9>GHjP`p_Urhbfv9Od zA^0b6{fTj?$7Q6;s&^m zOFs}!s}nnwF_l+*kQH;#jN=2E;r=lA=0Aw758CE8H<3WM0_MVPw$hh73k8uk?h6`5 zcL9SL=dkhp9>(WY)t1%?eCTis$kJ|^R1K+x#?j8wNXQ&@t)itg=ULucv(K$rsj66g z65ION_H7&ECEthic=Ywr^vM<@I~JaMkPn#qiN~A(^yia|lhmK3{4L^y@n`J2tX<1) zEJV=iQ5bB-?U_umP-G=a$7mkn+j71}l+3fVki z@sFDrBLmpKW?R_oyd$gJT*@MbEo)SgNnOEJRk@t8$mjqEt!GN3bu8YT&+|Q+v{S?T zPO33qdVl1NB>0VYY?loBzlYC}0yacbl^^NeNWltt3zf%FCtsJEz2WUkNbv`WpuM=f z(x=y9j`G&jSv>0h0B4TmqXMi#lBZx}gN9>+f-~h_HT|&sE8x55vXI~2O)GgOD@n^? zRhaqSVcLaG!Z0#UaoCZ+@W=M3@W;e|h$q3eaB8t?=qHPP!`sPkA&9dxp-EY`kPb4r z=ne=Xy*h4Een*iga;;XUiumWlFv;V85zKB1887Y$Q;stFzfb$@{#D9f=uuohh!9*# zi*0Fsjk+Pu>s>v*UU}*HlU@G+jpJ$jVXc<(urKCbMsPgMP;>d$E9O1L--rvDrU?z% z2{6oHeTu~X>)V{4_Onelt8A;<#SezsdR^&19O7#Sy-7S^P@vK5Z^MCb=Nm30UHf{K zauG5a335ex#h--#0JS`Cb8s}3jKkw6)O`8;FKP1y!1GpNKH_=6x+BaT1bEGP_rcgM z?nlPB^wgf({k&^1OUSgSF95^ImD=*7IRLlHNLBMUDJ;Ye_J0S7ykBi}x+6>vj9&R) z{t*D)S0ne{aGZbOW)3rtEY?`3`{`}{BsyQikM-QMe$qI>MC0U?jiWw=A+<_)lE6gzfTCOk~O#j>M019yr=LQ=W<|MRoa@RJGYh;+@95 zrud&tmr?%!griH8>5C+j_=iWc)CqGT-IR&Q$s?yx(4IN;e+Br4eLhV>2=vSA{{S}T zMIK5B_7fWL%%`7Ro;!B08P+tYf8iyxi^G<7mXiMfU`3z!K`kfJrgajGjNC}u*E@3f z`Hl`N-@YD4bF)|y`Q!Z{?DHhtnA$$)0D+wP*KbIt*yegB&m8cK?s5^zCo+m?EccVB2eK~N0oEeC^w%P)<5B_Y3pBF4#l zvjY<$%6-$k0i2#NeR1jH)Pwc;f51EACH+76CGQMgTpd2|Zoj24Na5LsM*#9N*pOp` zfHFBhg>T-SN=Wnp2Ct=)_Cg&=3z6r|$o~LH83TZN_4dVbJ{{E~Z93@>h_s(A1ScjR zl?F-66FKT|066RTR;P$3vza_QyH(R}1ePw%EM-FRaX18i^QK80XPk7eK2UDz--n<0 zf3L*pQd{5g{{Vt_a%q}`o+Z|&5?Vd0%R3Rlk$-)VKi8aO6Z|9Yo}3E!ul8fm^=)^? zHrhs$Y>9V&YdTw_zmOj=mdIc@%O_s=KDG7K<}dh4^|R$$y9us8)sf}`C;l#ezm0PL z0J0y#2z(c!_&>+~B=LTM;cKhyCSR~=+P(VP%_Y2u$R-qat^Whb@&08cZHyzwr(V|gvUr!@9Dlve`VFlurzP>vHSSRU+fyytgp z)qO)&)wJy{`Rx3CscH@7Y>j4}19`alSwC^KKH>alZcj?}i{FRe4z&LO6V2icPvReh zHJuLnY3;7z{?luNZ*?gSo>o;0 zaQe-i)vdhO(4n_T(&=QtRZ`r`3hg=jp}&OZ<|{0*18cA`xZCFfPao&$U#UN|4w0oz z@atR9?xM4@wbBeG6CSv9xpy0RwxbzgaJ+I8B+1JXrG9V9`*!&{ulkuWi1zkBm3w(c zBiq7HmzQ5JBa0tL*-Ae3^d<27sGi<8V2D^{%7ek`eR%%>BDCk0Wk%T7VlFoMWp;Mt z1IYgX_10#casUbw>|6V&wzim?x*l0z}H?Jx6rJ0i#fG6yfU@R$0#n5qhM_ys1Ywca!*|G z^GQ(B$U-qYkKO6l=~{Xgo|=XF+q3y5MZjs~5vQ*#cpWOL(}ikWwN=rbCn!|rgVg;- z_7r_Nd|E;f$54U3*3mp)1RW6L$q?CF1VJeL;?JZOlw z6I{i5%As$tf@@qvssZ9R21L#rlFN@>)+b1w=GOlJU0o{X=v*bN7pdX*dxB)tBg9I> z=OD`v!RxsJ<)szkzqc2IwLKHzGu}z6CC;4s>{3S@cCp)K++03dI8hLl+{zr~*m6%M zzOxNdF|P#oZ28KtZj{>SpX7WoWhsnGR!K=Co*ZNHuV(mHf97c{yQ2YBLoCu{oP*OR zkadT)C4T`qXD;)I36dVuIvo8K5=-1KT*sSoZ z{!_-g?g&RDa1@p2spHp=rxW9^5Jh)k2)4VL?L%X2&9ry}l12mo6Pz6GEA;T19pTBxG+K5;OYOp8RBSH^co(MJmy~)FU$EX&KH=eQ*!sU0=cnU0>h_#(hFT zi6PUit%NK0Tr`D=IRl@RdvjhFKZ*5CLqU^6Ht#Kpn2IDtL;RqCLv9DqW7uG0HQ>{t z=ZT$Os!IO=az3NlC}E)vjX%wtgi1IYZAz3Ifs_J%LI)d%9R5`3hSx*oS}KHac3c88 zhUxS2Jx|w>TDO*qXS;os`3QC>k8770BX%-RcTa~C5ZWe`FS5HB-hnH z5cHW3gzrtI%qyZxZdh8Ge$NBOa3o2lFNH=@L1hY1WaQ$%GCm0Y)0bZc^hWUQhl4Ej z>3-EU)OT0b4%hmp_*3?p@%M?YEp%;vP4Jb5i#r&djf-5o zXh~;cpJG{{58Soo+t3p@YU4M;vWM%38>k|^4$0yVFI~|ZK~0Ed1(&T+a@rZ zWd8u3qPqV81?u`W--j)wvA*zh_N>zElSR>Gvb8vof!PZHROdUm<+mR7tv4%f{{ZrD z@+Ma1fABxd{Oi}QAhb=gNM=MHV41KBdSr8q@O@8w^H6wOSwU@eFOw{n1{lP7MrM;T ze)9vN>-FGbx9zmMeR?#tm&vvhG=5u&NeG$bvYeJGnFHp@Jx^Nb{>=`CmX@{)9A+dj zD-s6;fb0f12OW-j4k)RJq@bXc*_0_s%bmL;Cr{UI?(GP?AL&;_yQo(Z5OJ1}9QDcj zzJsoCGxa@5zu_j+rG_?|#n><#4+_U`IP15l^y8)tRKC7jTepSxnVJOZ+4-~XbJMs# z&XdD-kxnh{XPzZ+OtFpUJAqeH z^y9aSb*-irVK^I_N0e_ngK{{|aHsR`aa>=C=e^V;@}Q06lXxo>YRs-a=>SG#$#?J(5cFeORjV3z<=oDVA86Bi zbv+MN@h^ZsWp4@iZ^N35S631cX4z#LG@(fXTe*WUz;+t~6g~} zZ^Zuqhxd@`-W0k|HRHD1BvGj#Z%{|v1Q1m6gkX#hm%#ox_@l(17Cc{lacyrltFG$T zi4T=z3*tfw$8QK6l|{lR9G*{8$FP3QT35v{*;B)FYMw9B#-Z@PRJ%6Xjf64_hp>I- zHo6EhHsyAl5+oS^0ASZWDt^f-s$TQx`u_l34yv+Mlcv^-uWv7rsqokKX4O1R;eAza zE)9jmmWeg8g$jtohk;)MA3APk9aSWWhIim|9|e48;tzx$5iOljcle(+I_b`1FU z?Hl0R8_Rot9cXhycc)#dt=!h{ZZ$TS;AK{lq?GAyk2C$L zz8%ZqFCQ)Tk0ciQ4!OGhS}+s$qXs}2DFhP8=RHHWI8k3XYxeTlS^bE(dDN*PNepGq zNh}JJ!Rdk1t$m5%oA|yue$RT&ixh_ISn$M!?8+(MBuW^@pd1{O^CAPb6q@-1RkK}F zRz(+tnXqJh6SYWVh3*GImFtjejuMqAxXIZf>q47~oEGI9D+>#ocDF~F6=EeoRV%ed z_h9?5*Clk1m3n8fdrLh&efJ0t4&=rcs8U(0BSGVWOy?}oWtR*0Tvsp_?L29+oNE(9T}b3 z*%!H(0zd?g7gblO%Qedw>d#IzT?&5dyrrT%kM?f;opmqT2gLH~I;Yy;@ZGh*`)%wa z_m?vse28)mT!WHvgai|kRa@wv+6(rJ(*6nPqriU+Qr_25vy1GKURZMumnE}gBdsD|!%A92%RVUrkff@hN@o%)T(x%qrA z;NR@$;xCIbyw>)=XVRek?Zy4;810~6yjn*-**F^irrP(X8_}R zh6|8)VB)+(!hQzucf{MNbxRA#^le7&7yR+#Z9C$V+)hxaucunKHzgWfH#5Tt;>X5Mc=0ThmDp)xFAa{M>cXr+- z((S$|{20EP&KA`!>@EKQwDjo96nO|za0%$dHVN!^4%OuU00O=vUU(;09tZIt>dJ#Dk@x_@8fdX%FYv2GeC)3ZJSvb>v4&>k2POBrrvm2O4U?$R>(gJ?KjepMs` z#zu3KnxDr}zL#UDT_w{8lKx{D4T1oXf%hD*Zk_qBLHMum>f7Px#IF+Rk992ho$f86 zkre<}+*MEmmK{JW1_JlRaoXmqXW}~~r zSN{Mb$dnqr*-ODw!{ZG`KND*=49gt8O_ig|wmD|!e65m6IN%PveJifkd_ww!;>Ow% zWiAz&&@#rGfaGM3fZ+hi`7%@xD=)xv%dgy9cz)YX*uep|^2Cy~nPm$S?aoUXKm!;h zfa7t--PgliV@kG){{T|b_010D+@!x~)0*3Jf`wz>wL?B5Uz8}|H>o3rycFwuS?s!YVojJ(TWp6d&I0uv zfg^fNBKq;6eLfpMu}=z0@=q`(H{B`%K4L};5^lymRR=!47Sa%QouIR1 z5BGos=Yi6&wOu<=y72w5DIL-}#zW07$WNJd(U&B4#(&v0+v%SQJYV1qay>&-YZqBy?5WO285qd=SJS_AH$t* zO1OyGUqH8wb0lo2?4}2h+_Pt$?pPi;KA`5lQisG=I?kCCs;0+KA2C^iMyxPS#v>SR zR4)J!21(6k>H5y2rv0Y!!tUbI=%`UOoUIg&q+yqG;(s;aQM zK7#S0{hP#A$R=qP_{$B&+`4U<7V)eM%2@dsWh}sBJ!B*t@OJ+IX8m2H_^)Ye9C~cg z$S&tMcCaLJIyUxJV5+-GEs`;ek&I;53GnB~mfk9BsI{>S8q{_&NgdCb98WZW3~h2i z4~?XJr<{?FmE8W$R?x}usV7Hy9!pz^*^Ha8cRH{nC>)XwGoC#w(ZM-XvRfvv_wBdj zZ-|u$#m(;|?7Z!{=YBBppTsW@{6p4kt=?&E=hZHL$uaQ#rPDNGS-xe&erx=!{E>oO zayR_lx5JMR=_lcW>i#LZ(!4dOU#0bwQ);Per^9tHDi_NVKkmXRY(Kjj_J$*YoH60+ zU4P>SzY;;AOLZo*cFi5tt>mUc9r9j9;T&L+$?|-s9V^W~A?tn_)wC@mPO^O(((20g zcD#baPriwhdZ|s~;v&v)r{=%_4WlBytwuE>pw+wSsqy^Mgq*q_tqAa(+6B#+@kfER zKkbcLT}xcCxzu#~t2^A5baDXM7z*DITS14jZW44LlRAGqDIidSM1kJ4CKy!eCG-nV0NmO9y5CWU#*7> zLrUxOJ=e!y7Q8*-?*+>RuB4YO0$N+g{{RT@ita6%IJP0*4X|c_DJKB4yI|)eE^9?C z{7bIr{vVse&Eoc!NT!fY7l^HH=6K17uIXMxi)k1th;xojd{^QvDt&v$r$*3p`>Y znk!hWY}_KY*pNnncVljM5Pfs=8s4;zRQOvxw}rfC;hzpYgU=z2b?bR<7Rj4p$m$@O zNzT>WGEc}bGmv|pT@^+$gS@+I`CR4t({&n>(DfhMpHQ2^+U>Q*y`pQkkQ)e5;_)5q z6BP{H7P+@>*x)GcLFthCtW6v~QArRr~AwC)?8%<5B7!3h?)ibgR8; z9}HReV(-JJ?1wAw``?2+P2roZBHL8abx01Ge(mKs zw2Bcss}*sEjaaZMK?j}!(`SzOANs(&x&zzJ}z+(M1f;Aa&fia_ao&Zr%<}LHjsM=6I&;p&+J{esVhxw%cPT z>TBxYo2OQ7Sv42`0FtA?ouep1`#1TXi>5Sh;Eh1xqd(dfqXr|4c{ex&70PbpFw{AQ(p&isL*49CCTZb~+lx{{RaE z!aih@g;?wfPb^^ki-DcJ`0hH_iBTv0f51E9+Wx=bm-Jm=$D~MN)nSapB3w%g5SxzQ z?+oN;q2jhR{bYGw9(lFxHaB}FWiGNWn8N1Ze81@njC483u6sbZg3m_0mOJ%{)!-j8 zsO!5q;15yFZ0nkwP>%}AHJbp!(b-IltGIox;ADK`>EES!&MRF%4?pmKUy0Rs-|_PQ z01?%#^m2H=Rf+GcCdI^ot@n-mT#g9yN&5V~x^}OGelGa;UHG5l{X<*T?_z0WklI^Y z!){h9OL(%8MG}GnMgZVoXC#r5YwoLEK}U+UWf#_NMa1K1m*zRROb_*+DIDj6^c@f8 z{-bpf@lK+_(VX4PawILEnDdj@>t9ckD^8`qFZc-ZGY{d#KkIKcW7>EZ;LIsmh%8X!-cf!fB(`d_ntJd_nPMud3b2;zhBs zy)s5D1W@c)o8{VfDJ*vNB%XR>jCps2z8cTtuMyg4x;53jcXuJxKG}T~Q4kf`iFY_& z7mSn1$E9*M(xAC3^6U?cJbq{cJvik3JJ+`U&K6o#---1vw%Yi9N=5LZF;ou6eT)Pww)2(Mx3z~5}dEEQ7^w_ZyESv8w<%lvTqvd^47s% zx76+KPn$HpQe-5A?c8D+$NaQ8#@>Fc_zn9!UVhG#>({<8({$}i#Fw#so*O9d?WK;| z*se(;NZV^U%ChgH4x8)Sy2yI|L4y`{?bIWH{l5L9e(j^=qv^#qi60;hUS?CgB({ zxVL#CXxDN1iBMzYDd;^p=R7aQ582;T_}#5(UL^1~pJSrV)&d!{d*>F9k@N2g+=k1t zuxxOObMs(&cs$;tsHs=CBVQMvp1xg<98O;vYn}_GkFd0z9S*GE^cM{r!zZn zTOT|)LOS&4u6tLZc$)tJPtbf5;v1XYe@5`cGe40Iqj_rjj7c<>%8~Ad;T#UvyO3j=`QKyhF*2>p z)#Y`ztMuQqJ%wMAf~1w*zpv`$zZ7^AMe#?B{4;ZT@h8KRYCawD1Uh8#+-b2}Jl95J zD2w zv3z7mSpfh6Uc;zr7m@hZ`DE1fn8Emi3M$mgz7O*G|&|QcrxC?QX5jfLVUy@x*fIyf=8YAoSOA$)ta0iHSPU&Uy0__j9g`> zPwT1j9h`*u>ao0sZ(P3}eFwkkT`r4rs-=)ORD(k6KR@XgY3*ZkHA} zH&+JWHt)4ti1EU*^*KL#^9Hh@Fq?SZ+%c3aYQ>%~H)pT_p4@#a?P%4E{q%gTXgID` zXlr*Dsv|Nj$$iA`oSc8(JMr4NzqH7z``G;W$vmIHmdLG3#-325i3ZScRzU0cVLJ@w zvzW`Z*}U8`2F4fyGt}XFb6QGDPUONaF|$62_#biR{>(obW#c|wlI~EW_}lxxQSV$b=dXNM1`S@NYySW@{@Ra9yK0Rqc7K~Q*H(p|QzQd($XJtu zp1r>swz_=xf!QtM+Qc^F!8z&Cstr#_OC@>M^3v)o@M4xGbKg4y6IJbeU3F@O7C4v^ z2P%KYy;mru%RP@f-nse}`#St?_$lE}g&rKxd_#9{ZLdNY--$HXULzw)>G%*xctH&0ekWzeB%25cH#epzAAW(zuZ7{s+_ayI`g_$d>Xp z-vS#b9vmQmD7p@BaV*`bs#uv5JR8eNo~2?-E*Q(rlOF+T07fV7qktdG59t zpLdowV8Oy~ZV1Exb!y$wwXI_3Pf0IsJZje^oH5sQc%Z!D$Q#jk0~p$SuG8MQ4-Duw zjp3`9t^6@zGMnpp3|3zdJf(6Ym*+B;!Vp-GC-}FY`Q=?lhpeZu(jkComzF_U@dx}R z$$z&l$NheOZ0A4h=WlA_j>`W4r}>>t`_A9j;Cu_A>NfUv*G!=#3-Y@&hQY~iFOH`K zaez8{QFr2$hzpmxNmU3AVxMaTBxOe^wPXJeM~2C(QFiW!NI8rt~M@f;;;P;PqWmuWsju zDz|W<0x>Xwe4pmkz zP01bpk?|f2JEYUD?bVhB#4SQZVCN6f9zgSYFBD%9eta$3sTo2&BkUgi7G6kA@0HkX!ar*IIC4_+H~NFcZK z;F{#F?LOVA*~K&MGB`5)%n8Bh8yV@_k#+DY;0uq6e-Sjh2u!ltNw(_n#uQ=|C<37w+Rp5;gV@Lk;2Oppl%+05qUu^w zsbu$OqI?_hKA-T{N!7k3>-TMI6TzEiE0=~QJ9na-Zk}ShB2)K5yr*agu_ z!rmXz+^^a#hS+A6cB9-c`v5PM2<;jSV7pdrhlI_y?Bji=+gLp4d}D6RSvNDNwFI#T zjAL>1#(nFpQBDeVC22c+k-D3YyG>pz@jl@2{fu7={vjuc;<2-qNuid|Y7#-2AT5B> z0!HO=(B}jOAomC99V$HsU+|S|B z+xTzBOL2XnJTO|radH`@wj|1-8@r zqX$i{Hvm|0RE|RogMvp!1D3Q~e6N4O`gxsDw=M0X{5?%;9TE?NTC4aQS|;P+=DRB< znQ=RB{?DC=#k(=+q~wBo;Pu`|nD~O%Q}`+3O-De#f%Q)d>H)ml7C$7>Hyh(W!yw0A z<#2iiuc$ShKSuba;WyE>Rr^JjrwLdt7XYyyg#Fo2^Z0ie&yYTYsCf6oI=92m5U`iW zo)lj{Pr7cposqyv0X@h88=f2?Cpi_ACuhw5f9LD}01iuqz0bS+kDq=I_@Dk2zsBn( zTeO!zydzDLNY`doW(6IG@R0slIps#-PWX@T5`7ED`uujcQE2)^^ZlaP?cfs1=;JuY z%Y|IBu1Oy*dtmu*#z-!_Fpz7|E47>xBwCfavY`^9le9Piw%`(=r~|HWaC7I{C5^VF9qJ|AUt^9)&SVWD zs>#R7JD-%}ra9-D`sZ2FEw%d%M@qaiR1;D_65yZ2X|-=kW1Wr72J0PVdjM{aNkS z{nTR^{8qQ8f1g8+_yzkhF0rIP#b1k;_ZoMG?j;k&9NU`rS(VO4#pRCnJYW*p`A2cj z{?mW8vt0iG#SP(YKHkqwv&?XKYQ*B!& z7~W_;8r0&mipg3V&0^Jy+G!DBhPAV0;pHF|dx4F|D;ZdWJNY(`;2jT5(qy&Lk~uBk z3fFTk>5Fa2JJfP;8yj+?=EqWN-i0o9r(5p+zc0VB=1OV{lArbde!p?^?}{(AJ$q1> zP}VP={`u2sb|Y#i94djrsOS|y0B``K!k#YGJQb^bvrM?yN!u(gH%BstCu^_q43DTh zf(Qn^gT!AC^qnt8l*p?s)Nq`$fIo$L4E=joA93MrI`ZaFkQ;YtN`{XE8Jcm^r$rq- zKDFvfaB;P?IO7@K_tg3a!hScNt-VUfH0x+nIR#);xZo0gQb@=LHQ{2Y zMp3eNT|A%pGt;9vMe|zEub=!Ay8WkgOFtA|Xjk9biw)(Kw3gb0esEci6_(kUkbY7J z-XjIFpyV8QEVmZge9%EV2Xh`3r2(Co;{&kC$8Nak*1i7#<1U$J@n7LppN}-#LY^Se z5?J*cY+=M>254Ln#>P=6Kf)LR^P2f*LDz0x{{ZYlHV~UrFgF?6i0hEU1Z~FbjC$va z^WjC*jjy)r%$n^f?Y_40MRN?E&IRd^(yP z--xG=PqZOE&mj#v0G}%vm=+DxHH}wIH3w_< zI?J`N@wTt1c!KWAJA2*iGicKwRhK1zB(`t~#&;gMHKw|(153AMidhs$Sw_T`~qfm}2irk8UB@Y>w1(x?w5luoJ}a^R9q-O2|z?Say*3!AH*Mf~F}vPC z#beGJ<-lz8$G>diu)Im)Wg28xSC(ccm~Hc=h&~WE`lN4Fw*n!S{8Gn>yj9w&{N1Z<;lCkKESkEj*&@9e3m3$Kc{P?b+TH0#ebt4TiDq|2DD8|?C(u{!+h8GyhWP>)i19cz+}JXRMK>>Mv0HR-?TdS23%YD#yr z*YZ8<#kw7aoAColys?7D#qO>z1@6nQurrdia zU*C9l!w_lupNXt)TVIJGy75krZ4re1j$%Y;qilm8#&g(b)7Se-{{YsXGtXZOJbC4@ zPm8xE^8WxwwTd)X+o@@j-XxzW7}**Vj!rYsj+I-)cQz>`w)T3JhM|xp{{V@8iq_DA z$0QbY3^)U?Tn=i@rmA$`jIAtJ_f~fRMI>*hX)s;Rq-`Oi^AWb3jDP^a0~o0OBI@E> zgCbw+mv#vP805Us;nQINvHQ`+pcBq`?nQd^Wc{pHk;_ppX6$piuCJ@fqQ`3!T;GV% zZD58Ma_JYG(-`9eI8w~V1Gy&^^asH&i&}o8;C~Nzc1YwGeje4D@=qoFRJvtTDf>O7j6`Gaw*w#m4>i<$574z;I>W>ojm?Cs zE!#&NP|X`4%w&>V10(NyWCk_kaSbYUFwMoQrkb~Xzb4N|D6dx!RZ2Hc^xyMGH?H_w zRnc_jw!VwZy_V|s>&uQ%>?3820?wzL>|@S9QU`bNBU`Z0yjv>G=9neYucKK!mX(7E zp_$GQ5)>>>az{b$R&TZUn^5r`rl+MxCF5y~miHzG+Kd%hj6{Ri=Ufsp8280vd?u1@ zGey)ShEroc>qIkdXxnQ?KIO<|Jm6$!wRGWXPQDIxD`@<_U*=<4DxG-72dC+NFVNG$ zck-VQ+N4XU+N<0fJCho>m2d))}W>5dO0zh9+z&qPm%UKd624XG~Xob4$gA8BagzkD}734wviuHgf8INHxJjKKkIt(e_H7DODMd6X9Fd= zupckeze?NIJb$OOJ{@TMNu(fcIG1Uyrd+7Jx&f4#R{&!j`3LD-&8^fPYeeqxeLbQ(axn7Db!4Y zRSk11D?S)7+}z}G+o zh;?}!g;o4X0=?hi&+Lce%a0J;>DuOzb>Y!&ttGyzKA$bsznH2{F(1s?Bq-qWM;Nai z_*(-XiM|g2ft`BHEQr`3`SIX_I(A z<8CvPoac9JR@uI1h{DNJqiZz(01ue*+!Y$jo~`t^;(mE}!&33D!~XykU-*815>0zO zi4m;Uo)}NElA&7&T~)BE!yu?2@JAJ!Yw@?n+62R*Mr+~4rGek|BrUS3CUYkzkxkzT*6YBu)J zO(R(9P{h#Lh~7t1fto?Mlji{2(c%iZ`3?!lHRV*zCksMwPes{pZzHxC2u|{hn&0}| z_=m%OA@P^Rod;dhS6lwwzlJe&9+PLM!)((spPZ7)j@wZ|3ZQ^~@m>RRx=)0>XW`%U z_weqgG&b@y*MDiZbc`yHatnY58R@~n?Tmhj=r_<^>Ne9^L3?o&_t$}Zn}~PF1Vrst z`$h|l0`1&!fmmM@wQW(p9sEeNnjKOrXuLUe+hi8(ZNpp5^UOr9b`SPkNxjc_@fL~U_tLynf3ds>!Tc?tM(Um~xU~#|J0gnaNxydx0erbS zxCywD=Qq54quHPMO$>TYnJJURwgykL{6n|2XCe#G3$w8n>@&ni227GZ;jWv<;?~dM zUCsQn+uL1Brr1R_=ArQR)_0Ls-%Bbaw+h(+yO5lS%&en`6w^K~$Kky@N_nN1!d7j4 zu3J6anumppr1A+hSk%ZPh?ae^ZC5$l?6ptA~YtVdUW#N4@#=adfH-X@|)I4z&%)f82zgefb`(){u%t^Fd zVM2y{CgoyI2q(cjZL8>7jJh4VT=+`LD;qNduAla(Vo3$m^QV+#ARv@*dnP%-z$E7t&t3RxHnp-b_+wfO zVlKfRIZGcg*OnIjaI5Mu+r52t2`JN1k@NLaoUV>d=LJHM7$dpelli?yBreP|7&yT^ zfAL1TpZG^x%UM6MHS1{dK}WR=dtm3wUOM*e*0c3X7%W*M^KX^qjJhPY;X?JuIBvXp zS6pO{cA4&92{RY$m+_>62|r{QOdt7ZzvEv$!349c0Ty;-w%k@dIy z*R!MO{{S{x*W#WnD48L;jY;b^g$EAI+(_-v z_G4LEC8Fv&#f_|)**(N@Hq*`-fH?f?s&2^~vR9Gmo*7LYG>t6kG5CElh)ZhPZlJmM zFK&K+sOw&r@On=N$MGvlmfCNJ@yi^uIFi=GT)1=s88UpS@{){aD8mF~4oI#ez}lS9 zY1&z{zPPhEQ@2m?#5!Xq{04A#k@$gL;qcG?5{plW`V<#;-Z+kXktFb2{{X^&XMJ%h z7^K+nTlq?5$r(ZpG6Bv3uR6TF#=M_YdKA{E_H7Sd@YnnzR}wa(@XAYd)L>Hjhleg< zw^=;7wug);CT<&Rjsl5u%L?7_?xCX{A`@#j!?OWr+H{M1m~FoMXKFCWInICD-A~YU z&k);qYC@Wi#LHWhx{Wta;d^__poLp1s4^N7Lhg{@saVJH8kP?cYJws5onOUs&y%`K z&j&{Yr=Q|Q7#^H;7(MI6gq4-QPx3u$K4<>`1pMY*M#n?auHG*oTu9(5B#7-HPysAX z><*{@0Iq8rT-NRu<{KDpe9Nv-v_y^aWMd@t9PzhrYU*V0vyCd|7oONm=pI{NwET&a zmO!X6pShA4oOI}GCsOd9zpLIB(!49FU&wZgd9CfPCSNBU5aY4Wt_D7|=uVTB?|)Ov zag#jm=TN$i>24BCOBwshyGS|DJPh~iTN)IH#8;*X@1?bp;nf17d<+`l8{5#7k&DxtyMQG6O37{;bGMlBmN`&!lp`bpOJlWqW{LYQ_!i32 zB-i2bCaZLXa$acUP`}+6ho*9Pan~N_kNByfCxx|JeKW(B(nF+89?KO`xWdXeK2?6} z79{ch@yQ0ha;7?#E~2MS@_qJC)2Zk@ElL!*r)wMzp772uJh%*VF~rW;ETivWmOOK~ zU=DWV5fuR2rx;%YrEo~(*K{9i8ne7gStk>%Ppi4TYL*rOPG+lN=Uka9`u zf$RqWSEOlL%$9!#wJ6@#WS{VmDOG1xcSC0Byk|KBCxP5!xULgZyNVzXP7Dk%WRE*g zW1n{8oNz$xRW-j3-rDImS1_&9PX`7V;W9fue6>zH?qzO9H}U?TCxt#DT5F8~gwG$??&Bl|OPC~9OoQKQ zGUL)SU6^TJE}Z&VuhaaFYO!&Wn(L!JfC)09%_h^+argfKKhC^w#U38G)_yx^dK6Qt zTI;uVVT6%_7C9tXp9JSTi3#J3kEry`Cs#J8u`v?bu-ch9*h#_qj=$vBqkI|ocW0~o zMA5uas(E6<(&PIs)@eFD#Jg7`oDiuasq{0U9Wpzy%2cVoWq#gfG-9hwTT4BBk4yN6 z;HUWM;;TOdUEDo~fV8EHT=A9ZCN+4MFKMUl9Sie3gQ|u@+*geL)c*h$^$&=CF<*v$ z3SpsJ*h02Ch3&d+O$N$95L!kC0J@cMBpC%05duGbpK$%GzCGz*3ceX@cAf;5(^A(w z-Q(<1B*AYaBR4k(ByES}U>NfLQsZioUkvyY#LeNK0qN65t*Po$w3ipRrc~zT_Hzc1{60r{x5p- z9cs)brp*PXkr=vm#=to9E(q!LCmpbBt-kU0h;00Wd=_t>CJ7vaor)i-b}JeDPjOuK zt>W7Y+xcR*k~di-F-etRQH+K+9D&o%>;Bb79yO^?-h{q){{SOu%9Tj_vR{Y!oZg-u z6KJxbLJUNPUU;)382W=+z7_F(?}0TaHH#)K_Iq_{Uy#XpeA@r$Z)T6#W zWYLf%9%cX^#1)tU*FU_82V7v(cX~7$oL3Rrm4-}r8Q;e0Q zjzvas)oOi>@Z-eVRC?NYg34&6&^6RHn!T)q6@ugw8cmLaljS$n4ggYX=1-1459Ihw z@g=O1EO>WN6BKQvott+Uo(KGkyZ*+%x0-3Zi)MKyDYW5HhExDZ0|k_4 zwnsfO{VL`wEk^FsPF|EyYR>CvvH_t2L2souA2wS zi&N7g!-2rTS)^b%$Rpe3;}y^NgTOkzhxT;8wzIMeGMKH+%wdQp7~2~Lj2w_oRF83z zJgL{i)T=6RmDR_SZ~!i6{0mc`5!LZXY?r zKjS`GF6sVs$2gEkE&2biic|GijYVCHBCRf{%2IY+5g5+&2f-(+IJpEsBbtpPb-JUh!PXPGK#tRZz>2`~!>Qf|7t3-Zi*NzJ*I85Vq6r2J%BNgu&uZn-+ z2#ZTw(`Je1HI!aRAz5Pu71_a3RrZoN$4rg|c?OH4S@_4qnthUL5qS@9F3HriYT`GL zrp04~3%I0b8q_T=2 zUj!2#o~pA%Rtv{Vq}qCTTMmXNABEkRwT=C zFn4t;_z4p`;~H9QWjb+Xk_`i`l$&dj_Fp zqbUCXOjV8>6*Nka^2?E z>OJM2HVJ?U+N+PxC*N&NuS11jmE7~q63be<__20$#_=qGM2{-yvNSB?YUH!|(S>e+ z#FxROM%~B}hkSxb##@a3eT}|}ZK!--(Gu@QxYT0u zpve1o+0+j?h93~QIOjiiIqEB!_}`>iTc9?69I^1q$pCe;I;6MqMmOO3a!H29bDi1i z$5CF5IbLerzpv`YJmc=OoA`a={{R>3x~;CCr}%|*{qjc$GQk-Nr#W&;^iaH#K;&eB zlV3{sM&{!~@ooN*tN8x=##U|ni)-x*{TZWU_Xtrk{Kc@l9B@W4-o7oij_Su$j_Te< zwrK7#9Fa+eM$c>BW*Y>&&|p2>q@2_)OxFZ!>3up zMtg>%@u$VM*Yeufc!@Pdnt0bw^X_DoXHzLdyotz1QdI33;OD0&U&G%UyhU?kaXzN8 ztf&At81e= zIi%W@W_5ZW?H_NdU%j!^ZR}%Ef6^}Wt3*=!uoMJ{o_RU;^{qQ^+6Egv8q-kLF6?Y( zxQI%&sc+{#2pfOZs^dQR#t%G-%kXc&yKfUh_YnAQ258aUlFMGPx(&4n3K=d=7`D-Z z2Xmj76}98f+4fyF^2poxJ@4%0wYGUK?e!&#snU05{nz~gL}0i2n3 zsoIE{!OkR70U0B`);0YEytFl2PEcG#|P?qX1um7M@I3@ zT?wO@)T0P26KGO~LyFoN^0#2!P;<34{C>1`28P_?QKQmbKcrKLTH*h*wQ1luowcPkYYW*gVQzF z&!zs(cQ>tYPlyuk;aWLoivIu-vJ<$6U`hMHaqKv!ufzWU+1WJ61Q-4zir&l3iYqTR zNnI7uoy=$=Y_1TTf%mreJkPSDZD~9I0A6L@s(MW1bdTC1$H7)&2qd1`D_P0?_Ni{F zvPi9vcPId`1oX+<-xc+Dg>-#8;t#{EJ5j&6pX~l2(xPju+`Gei5&X-CAROZa z;=WzfJQw>d-w9*B)x1ZR!=JV3cdUL@z~}7Q;Yq;HbL(G2_P<9r=in#6dEkmA z)wJh;Fkq>#M9LTw=*OmiUwU+ZvxmW}X(oalLKs1pd#h@2xmMf>Jc3t{2kBWC9u)Ws z589G@{U$jq9enXMh1_N$ z^4!LOSc$+Trg+B(s{>Y)rMELeafElW`5JQm%s&k7E#!vR#W#^c;{rG}7y(i8;0Fn~ zeZ}W?e=}O&@Xfv%x`pFk6l!6PS!PScR|J+nF3b<&BN)a4f(TW{4wc}agnl0Jbar;S z-j}EAuWc-m!#%#BBru3v$iscsFd_~~{_blg9|imq)~)q1Z{qz3LvLzz3*=Y6i53D= z_pUzL2Et5aWMh@ZJ2sPC-TwfsLGs?suj}M`{Qm$4q}EkI@t&}a9KLK!lKsPyM&5V> zJPz0%mB9Fu_7m`@h#^y`-fDL(7$!Mc8GNGH2OC&=56ZFpIq=`$<@TSfjVH!l9h*;O zg|!Leyqqk2vceuly(I%~Z~!12W1asL2A#5d@& zLu%4_9$aXIF*AZPvmAB%y}H+(+cUczaLN zb&nNzHs$S|?c;q+#~T)nfeJSh^B^h+?ed;8foA^z;ckh2b9Je~tN1Hg)Q_2V>1}!* zc^C(0WeN^R4I%tZdX%d3MM>95_G(|Bvu6~r?K^uf#eecg&OQZVJo{Pr zJ@CU;wVE=QsL!~li3}jN=3qB1sVXwq;2Z!3Fa>blDE*uCxjbQGt$1@*w$iLshj8Q^gap1O^~EJ4<1c(LXaz|M2?oPq}c@qu1buZp1k^#1#vgy~^wMLK%l z%EWqC!JEAoPt)VL@ph7;1yus;!%*KT$M;%jl;i7~?tDMtxHSE5MAR>}9UE1%v$mQE ztz+=qRz`6rpMbMMyP;p+s5?2~kMSC@@K58+{{RVYbtx?LeJc9m^navYUHP9f#u=4z z8|ENq2WaD|;2m@#gNy!GO z<0UO7?4F<2hANa2_g1H>=-wpo=AEKRVfHO!PR#2z-7m!V@tll+M26h5ug%PODtIJ~ zpj7_=@R(@H#CP5%C_m@6zjrhH<;}1ryb>QC>l*HZ6}#zP9I>!d_Jx|mOl6v4prDz? z;)-zPa;F$L#w#*iM&36oaSgYd{{UP{@%%h&J+a)6O6Hw)PHq~sy4{+qG^KCA^hNk# zt=MUA5!1BWX&ynba$;UOf2gZ#$>RZ>FMAPIS zyjbJ=9j*%Jg2#AnI^%aW@-eSc7Q7n%y$^5d)UEDGGv?hA_ryLI)nS@-)^6>gS5~^c zd7I1}?6a&W*Mtf(!anBW{YpM7`NiC#wx|v7}=#fl-E;4$R=m8vh zdm6(N7d2?bU%S8g8)4(g@k>vU#HGlMw!F-9$ql%EMzcI$q}X_aMwZ!iUn+7l88)kN zwfkjU5&3iIMSAVs$AP7{R9lTk#zT$%ZRBbq0rnGu*~am=oD7_2G}{jdcxT1QDrp+K z+&p7@D@2mx+%R3da0JwG-`A8$a{I#pg^g;VdSa^m+@dm4-Yq~Dx!diWvDV2M2U{HGx*1B&Q>mLxtn;G!; zhODK$LHowk-Ui-%S8S`$7S49`7!{oA-N%|ycK-MIqT>l9?4_@l{1f5LW=s2fCz^i@ zYL?JC>`Q^DLga<{hSJa{JF~_=g!)!wSJrUbES^@Q2>aU;%5_ne3IX1rRn&Q{{XaRo#Z?}4ty=CWHFISs+Bw291Fre=mg-NxT;nk6uvl4_Y1209?&B4;%O0~ zP}|%U8#tWfxzDu=;nR`-0Ncy_%jaqPyvg~e{zuQc55&DM!bv^A)Ab9BmyCqD@**J= zlhJ(cdE?*i4i2OUQXxAD|ve}CZxaShQ&2>KmW3C(I0prYgR2&h>$3DZa z_@Bq05Hvp*_;*+MRSuJE)-pYn{j5&UaVpOcR8=|Umvdl$cD`b@juj<@NV2KhouCow zGh0%PHFr^IdSRoUYckW)(24M$vzHv48PTU+|5# zfd~8|*Sf}=J&bQWvB4tC9GPWc8w7u}K^~ln`ftY{1-wJ8d^7OAt)%KYg^NRH4d?b{ z+c7rtvIlUgpt3gRAZ^`}pb^L;x$w`xpA1{;77o!p#1>4l23?%UM!mFO|j zz1mAVT~AJm8;GO2wUi-fxfzwqVF z%)0#EPl#>xXPB7o;%jT!mB9x)*`Nwe2?R001&4Z}Cxnwm@gV-l)@**ka^M?DWMQ$o zvV6;jA0Z%|WDih9eUI^9S@;X!pBdd~9xIvbHF=1=mKd#WN-=+%e&~>{ zxj!i2XQ1b{ev^F%_E)-i^yxLrc%`(EqT1Xw;uzRA=TfCd@oyv_PI^@xPvNiZ&vmMp zpTs&{gzQ3y*78_LJPr_$Gmzc7gH;?wSTBZO_zNf4!cXFl-jAGa{4uDNV%Pe5tgS4D zIQ&Cu(SqbI-M(iaZN_uKC(^6Q;AviR_Zp>wM?1WL{{U=Soy#HsTn{syl11C8&!v4N zJ{11X*Os$dL*sohkN|EJYEbW0Tnw=%a)sn|9XYJ)&w$^v?aj&0#Emu?$=J~@wP?F| z3yr(DJY(>!UOBaCbARA0g@tL}Jiqu7{{S<^tn|x!LRh7Qj(0iRe*-8xn2<3iOuH34 zZ~!5#k&mzLjJv@Gj@ zGWl{i$501auQ%-B@QTG!3qOl?F_RwFVX4>_83Zz%zq$7vv6EhPUu9Q!FS(wjDz&L- zlz#Mc`gX0SY1Z$!!z_%uNK&VFbAg-z&pcLr)%1BP?ZE)u#QV6HCnugX0~Jh&36>pHqt7MUVX- zQ@FDz?7;EmFa!~}=NKdpYKv0+p#Ixn);3yA*k3PynjR7*378b<()tG9OC0s#jbz466#8WQQ=BZ5l{dwW}3+ozG0Ackq3u^!++ z!*I;1fOGQ^#w+M?m>N!g-;uWWcl>O8wL0_V{o>wKlazH-mM1XmtxoS2}IeE3z}%w1aoRvE#;;`m@UkXbJPYWj@jLeyS8)pE!=}16ez|xIm!3Wdg!T! zrCOqt+ObioQkwTux#}8U$4l1KEUn;%Yqh$Ia1o>gp)PP)Rve5BFW&8*_#7Uw@u$QZ zjM5nHrh;JENsPHMoP(7e1~4!{$j4ftW8gm!css>wDEiIZr7=YVzfDc?A zeQ5D7fILL{yz~D6W?Xr)ub8l;EL*5urx^jdoD-f8UNbSnVPR3)D%SD|Rm4sru#$IRc2CG%eC@CFbqgzF+qT;TOg6h1mxnaPw>+J07d@Nv$3?j)K=!oKPa>+ zuIy)tuoYRdF&vmV!3(%>L9a#?D6e)cbTH(pypuWjY^J%ku)4LHZ#p|d78`OwC(Lqs zdV+Wy*Q|Uv(ezzb=SsNLG|MO@b;R+U$gHO$c??g?xebHPMh|@8!SEl%R{BPfb!~ec zr1!g)Xl`*7wo)vZP(WtgxCX%h40XXcuVT=?6DEbGT50E1veYkNT-{5)))|no4BG?9 zyNTQvmIvhY70rso(TzS>%X6m>Lae3D%5+zEM@P}6v{+IHj0h29!U!buQBDT{0tnAy zR8|BUnQyRZ_V)=I7Edggl2uk`lqdy}m=m;p+~Ws0>s{85r0Ul?vAUN-)Gn;9%#t52 z%1ekyXIzcw3NX$=+Q$HAwP<`U@GZ3e01>=0HK(_WO1cZ>ivhlA6iF*|Kv{FOG8+wo z=R1I}nTk-V>nG6kWkt=|ocve%ey!sf=CstTt~~fJCrRAMqsmb#JDh|Y6{B#+oCR!= z*0{fn8fKqAg7j-SBJj1$BUOr7W7G9Gqk|0<-qw;upN4D>TBHKjZTsiT(@tiF^sIKAGVSYE3$8 z%PWY%vS(yi;JjQ000{tRjBZ?TMn-Gg!^)d?Px3e_Qj|2Yz2OfDFN?l0XlsuOppL>R zmR}IvIw9LJAz~{ct2P)F>z;b?T(9jb;LG0u>o$6I)t`l|jJ6Tn2>dA?>t;q{w4;*P zCvHgxo;j}5S@=V#d_wq#;PGf9j>^Ku+Dn`1=Z-6QLwNvJeeFy2k7&Wj9ul^bG4$Mq`oB%*+$_hiZ~Q414l?{7p$ziiIxv`gQC6XQPLs z3S82yeYExazsT*hiw}nyjr(8g)&^@x5((ya631|nZk44`w4*3twL!yxRAqkddoLAu z6X0g6q-m3{g#2;gOG~L$?hc=+vrBVs7>vlS>=))x*gzj^j)dTNHKfZqYkQdf(QP6K zq=o)ubm!$Gxa8Mp*Q;Tm*;wgT{{Yz$K*XrBCLHHE`==+be?eSLN*zn~i&oK*CCpwQ z@!giU;0;q-@TQj|y|e>dchmz$q5Q^Jp4nqxku&o-alsAOJ-3QAtp~(@0k(_9uxb-s z3xwTq2B7b{Om6!>GJ8~N(5X4lR5nc`O%Uki+hCJvzJ*eqgY)9viM{zB)%-B)$va1&M z2Ox8dWarWKUmg5NxtGIoomMC!v9^*p z&b}mgFG!l+X|8-VX0r=fE~U7)VAoH&5H_i0P+JT*=Q+slk8Svc@f*eZhr`WA%6s3l zH0tN&DBn4a9&l2Nm+`rT(k-Te>Yr@DA+L(sKtU&2?H{w0^h@Fr&G{{V_K9cxU}Qf)f(PnzD=NLp(bSiuh?J~7i7W81xXb@J-9 zD7eR)neD2wU3BlaTONfhZ75W`wXOHRU3B>#$?+G&o+y(>yt?qWh&(x@YnEU^JTZ8J z)#kgAN{=o4=mzON{uDP1&Bq{DjM@BiOMN*tFB(hY8%h5Fad4AEVWnRyi37Y$vKW(Y ze3u}G&QSBkbMxtXNwSjOKN0wp#O`Gla$Z8#q42qL7TvqB0NvKA>UWpddR>L6)!ODO zTWeck1=Mh~Qi#$)xxieLmRxg=53N=gCd4RPlKS}}=L>|lxQVtyn6x=jrmg*}du@BE-#x6hrb+F_!5dsWqYA5l!+taQQ?Hd(rqq;LeocQ?Iwwk zonz3od35VI?%+u0)^z8%iV0jikclL@w~ko>ROe%kLj&k_d@=t32>z3&U#+fzsA>0C zQ7jT%X|m5|Sp%!!n2M3gj+i~V5-Sf}y|!K+&jS+2;%~$K7sk+PUPp%QuB~NdceLwVkC{W+_EYA!0$!?^-?%E z9wG6Rx)ge~lT8iF-Dy+Xq?1~zvbndnMCC#fH$;A8o;LBvJST|!N2h9jC)MvSygQ@o zQruooA-$d{RpkvcHsTgg0U@)~jQdwrEHAW8a%tppRjp2oR-KQbyhp5fWA=yB#iosM zrCi(C%5E)g?-FogZzLkC$t|!z&ETN^>+ zpsn3Q;f99yx}07hTUIwZZ0iK=;XBwQZzeMkhDj7S07Q1K&>6Q9rzHr*@W1U19NLG7 zn(h;QYpBlBEE>)1_fV0XsS!wBMm+}A7~o{rp})jmhW;;-;%^h!>H3|@#?nPAdCJ9^ zHa_jI7{M3-4w%m;k&465o0`^tiGJFfk1E*tv&TLh_;}5Ars}$Pgmin$yFV^(KUvkZ z_9`dIQYC?gM3(vFf6G5iR&J^A{{T4Y&a_ znLR)(pR{zkKCRq_bvj-(rY*j!^DbZ46CR3&RB-}U`$akox0Qth{q+FE=S@t&Qcs@_30 z^~J;&5j=(o&?<qFug;*mOY&UqDPDQ*y*tiwHr z6=hVMkr=7fa5+_9ezp4I$}rFDpOdV{x>n{ZTk3j+s~;}rR0Lq+LOc6cbKs91c!x#v zq=W7HjA)EZ7V{utHaa-Vo-+)SfsT!}x-M&v3Z#RD1J|W&X&Qu7B)Fd*=Isb1d1 z{&kI9iw{yQMOpeIrjBh|oReAV%V{?r~l)b$ICIs7GMJ=8@Un03d6e3K%feXk1;K6P&A9{8&he%besT(mQIM&LWn zZPp~!pz|~3i2*AtQIU{v0XfJcp0)Y!)Ugd-F}m_={Liua<{tKPv-3~DL@YNCxgqQhG6r#35`Nh} z5|#v>3te{h-Q;LeTYW4#z9UyGxRW^A#PsdRtX>xq{=+s{tUCK>_bb~#cW^uZ0PNL5 zdoWU8m-x?5ebwUnhwYi*t7}_ZtGP8B7%t&A5w!X}z&iu%C*{v`{{W9Ktw|T{ci<`R z?X4SHRhIHgbch``@8)UHJWG)o87leq?^E?^_x>CEi~7YM=U;j1u-Pi#M7ZeQp6C24 z^CsroI-U9{+QMfe@HO_;hwVk-TZ=0T<9}^+cXUg`WoaG8<^n<(7I>5?QMi!U$Xu@M zjQn@v4-aa(9;bOV<%Ftvap_GP#EGO$vpV z2*>fwa+;34srYxq78>4@bL47P6URNYh9z*4BL+jYfd>Rc^EUbVM zbJYI;FYvBM#(H~whT`JeGQfZ|p0SO=a(zZfAB}rhc-prsJp4?*a^#Pse`fEGKN;L^b1hcEj$3O&!{PKZLZfZW#Hv z&lxqG{we*Kb$io!B%TL1EWw&qia3~@=Q+Zjx%}(fr1(kT8DV5T4Dj-!<8Vzf1<1f1 zm(iQ^HHC5eI(#p@RWo=WMQomST7>le?mbOs8n|_$l-JFFk}cA%^>BmvGt4Bm{g<_C z>2Bn^@IkwkjFL)qRE)E?-aJYr}RN}(6*{rf#G zwwk@a_9)a6uBltQcX$OSl4W0P@)4Rdz+s*wb`o%B#{ZsJ&0OW(}^*`s#Kd+ISXZtgJ2-E)1 zY!by9;ug1)X1^=t8bvLR+^NM%{sQA z?9C)zX2K6(JlD_H+M~JsiGCBdSZPD$}-%GvEh{n>ZgRIo;@;=7UziXcZ zc%VqS{{V+IB)pbZw6nCagY5(CsgoE1r5JT*m+k?K$rn{{m@(cQ-HH=PrdbUDFnj-OnN zSE!GT-Z+6F+2WhYnlg5=A%Hl*JdBgc>-yIuvX<{GJF{!6dpm4(!!yxFy1lz4_@8mu z*y);1w{oyr`O`m;RLJc#id?$8ZgwR`6l4R8D%U{zWcOB9)7yDAuP}{~t`G%gF{^n) z)Z~%D^y%O8`X7v5JJX#flf<{cszwnRfi4xCs5s(54g83EvBc7c)8x$!?9d_nPN?H{SRh=0O8;P{yRp2u?(et!Go9&7ua7of*ErKbE9 z(4kPbmv`4Pbjodw3G@I0e_HkM*;XRFeWc)({Fn9k9!@7Sqe@(nlXmkxH^m>c-G;5F zUE14UcpJlP+vbMj!u}S$Gl^SnRJ6{kvw}c7z~c%53BUk$KhAB?}dIAw%EfQ-CMBuLqWCxL80l}hDJmnkPk!G{PA6H zB}?MB+`q`>S;a^2{=epPFZ)^RamYNM6L^a6jzP53t;-)%`>8#u%-^-|if=(vH;SbP z8Aa4AKsqrV7gR4H+8!`7oGgHkP7l_yc|8@;1I??x zr3d||yj5}#L#6BGw(ZN`q21Hw`7KwTJ8|t*yLksr}^}$ z;1b6sRhS-xEsp5Mn zEVVxxNgkal`EP9lvcy#IL5&p@k7~W(&xRJ>6TV$WTlsIVJhKyIZC!^Q2mb(GyJ+qm zRr158R~H$_>DThB4|1xbH&N}!AkTt0^sZ@BYCMTqc^k&7ljf7V^D35t8Mh=rxA%wt z0IIAVWV(fA#@^W`s7*4NFd1aeMpwwk)}tu=XJVeeG`Rj|v--Rq=pAVrcG}QUjf?r{s;LGxT!YTTkL6kOT}L8}0qgfodek!d zSGz|QOl;D!WsAK|Ta{qa5=}#H%AgIyil)*7{5bXG(hY|h$@LuKy{O@7rDTse;-=l( zG>KLmY$x0Q0Iy6eBK{Npe;TMGToM6QK7`W=jy#^dxir410{*ok;?BFl#&B_3_Iiko z6cWTQI0W(dS2zljz+>%GtEYX$4m$O&m|-Bg7)uo+rd>>up=V`ZMj6FV2B;9{&k9KD zybsGY&PK>aaQO!WXBn#zx`hQ5_`x}E&%P_tjToh4h1PAIMZUKf`I6dUUGN+y_P%ebU39psyl$I5%_9SgE9DYnO7|C|90I6z~Hv{#`o{%CXJdzF~H7>`5h=6q0@T zt2eIhNn&%!#zt#7@-xjPhKyE+4O;S()ta{2%ObM07Se9#Egld4Iz>-wc^r-RQZ*+y zLc{r!SWA=NkELC-$Z!Lx&*xdnj+zxpt8HBFbgQEpukYmFo>Y)Q{5y)&ZA7p;o%!|) z{DpEhcV;cPl=0Umq3i2c()g(vmyV|dAM?}s*9X<*&2!j|b<1N;altV zM^Bp={c1n85~K%X>B#1nGcy7mlzW3-jXX8?Fr9wIxZ{C{K4gFQ-D+v^+U_})GBdy| z2l?+<@xePVQ8Qzv3ODtukm*+h49ap&24epJ5sK)mf{TXaQpCwiW4h92u!a`facVL@ ziNHTjwQ5_NW{{B9_~ZE0R}}_}d`AD>PW-_hyB=B74o%m*g1S> zzfTt>AB5xMv7SO(&bMUsF_FjMDsg7Wq{y*gah?GDNou6`k_J^}1P-l|I(kI&upLbihtS&b|w&{jFLbn`O;ErVc8!2@B`uokKmd0$tAd3N4SO{hwUo6ikIW& zBRxk#de_r_8u+2%Pa0|SMSm@|@JUdpk;mnB{0xN`&L?$gUg`eJk<~CdqL%@s%<3nWl6qfb1`76@UCI6J=a#!2TMg0iIe5oCr1`);4A zMHt$VU0ayj?(N)PbKu~SoxM24eA}XYL)NrWy3fQ!B8=?4Lyq~$jD~JcR$_YBPi^}^ z_@>?Vm$SYKa@e?NG70o(Up}2{^Y=VO70Ju@f5AS^^eVL{Equ>y)_wx%8kAas-gs+C zxv|uiKeP*Z)t+V<<;jXX;JbiwPdVe7UlIH#iT)KYgLIz`L1PqG9()8toNjEmZI`hz z87u5~DvU91Q0Goqa>#C6u;y_EPD0jcEncHxWdZ zvda(=f}k>p(446tlj<;d6}=zrL1JDrBo~nqqjLEdM+fEfhaIY4_-YLrHH@1PY+F`R7d?zVOhsZx@&X{YDk`q1);{s!v46@oc*oet|<)vcm-gHWE#+7mID zGLoCP3V}c%ast>{}!Bs@m`E8=>m9@TQ-wK@6r;k-p7w2LAvp8Cm)>WNlW@ zw(dnGm`S&N8|!U9cakz+GWhd0?co&r2v!9E4U7| zm#_Q=@E?cfv5QaegKBoSCBEO}8{J1_Yqgg#qe#auw-{b=Ur5=0(w-lV>10ht!bTVy zBxGQI?syN6UcR2xKgXRG{_o8DTsV=u&9K0BjFsR4{5<1|=K8{V%A@qVMw6w?xl|Xa zC%ax?Zg2oT&x5DM(hlc z+xk^j_{FEmE5iD-r%*!1R1B9VoMZn0)mhW8LY%pie|a@1RjD3U*W7%&AAo)<>UOg} zm*bxj*uv1k91=<5n}Gge?s-_NOMqAfQJw})KqDfRr^CP6MdXoeJX_*x)lNx1Ahes; z8O)~{$9np3{7unsuY{L3G2LB_s2+Q5!~k*tC3<^%(pvmX)1nc{Z>T8wl}jS+2bAOW z{ZGAhM;^3J&3UE%W-9=a(ogUI0DyeYt@x|r-+(l$h;`o(>#cb4d9Y6%#kA}O#ofYh z2WyAmjzRB%Rp0iE@wAsD+v>O0)`xl{$qF!2pOmn4B}ND#kIG1Hy({TBJ}FqLPXs!% z#~EGbC6Sd$8%W6DalsWNpB78vX`v30g1R zsO)_9pBw%v-07E)X}aCj)RxF%{?(4g&<(+`C1{==6#SrY2hM#3P`LP~`$OLA@JXR+ z7x$hXl4VhGbD&yD@^8Zu@jwB<>P3A6ck!Q2SQa$Yt<@O}Vib|K>~5=!{nLfW=ys1v z!_+=5Xm@wg-Q8ST-QJc2&nPb|DCzSYlY!HoFXQYBt!-+*y!rC?<9F>n zY{o-ta-$VH&lSy#$i^mk<(I>(yIEAW(TH$M zV{ap`rF(mPVA5ug7m_I;Mg*ZpVUWO%-k@WSpj46kY0~`sd85l-Furbj4l}pd&INNHk+eulG9LjY{wHZ{mY)}yK=q2$mj+&RVmJt>cWgY+jQ<`;W$){IaZgw ze!Y)9@y@gHGvbDYs@nM7H8sApgZ?C1fA&bXmi|YUC7A*Q+~rg&;eWfH_2=>Y4e{Qs zb1Pf;FITsTi4i5uyG68TfH&>>*WErS{kwi2_+vpPJLr5zc`z#$pAn8y2RJUlumCtb zo<~AWbpHUe_r_li{7~>e_V z^xw$htAM48tH|m(KP^v-JU9C&`14!R(%ySZFAUpl+~W3rU4m^a!Z7Elez?ZtfB~*! z$G;5heiU9Ut=79~sp~N?VGf?mKm>Jt$h!c>RkOJ71SkV1>;C}T8{$ue^dEaH%MwY+B=S3qOegcMj^-rT!ZV_6W{cy^#1^Y-W{6<7grd|5M?Du9^pW*K!)%W zyKdp1;$D9~YT&(Xs(iI7bMiZ24^G_&(z>OB!nc2BzT=T%Yi-WVSoCWPNaKYSBSZla zGV6di0~@&M{zYmPbIvo@=9L=?@Ko)Ip}y89Y4-1&YHf5Y&?wnrM8twT}cT+ zf-W|Jw0dm=1M;lN?BIB}31DM_K#P(2k($`JvJ<+olHHXtjqJdY&N$A0EcB{VtZ0E! zB@56H-FW4g9 zF@u0UoYcu8;<}Q>2tYi^ZOUZl_iOT>=lS~8$l#JtzEtsGfXSH1-n~W+I(z%{rbldA zMsF+zBZBdaj@4o-ca&{#rCa9UADEt}7_T;@PWUoGX_(8az+3yIbDo{?{Qm%*M{#8$ z4ecZt^Zzb@32DbMB4=~$3R_S~Cek)9SM zIs9?{aaT^Ve7%ud3yr7vh(6qoe_H8=8(!v8tgYD5m7yz^w~b_O7B+auQR|-7i+$wF z&yj`3K3$9t)Pc#RmgaZMv;h=jJELxY$i-50munUXs@!K_=jo6Qdemqr^*Ab4Z=ql8 z@ssy6gU=YnDhQLGED`BUXk<7ZXI_~Yrz|htKsf5P=yDu<&#;n51LNeu^$Y$L8(PMt zc9Nqw9~)f zALsL`aZ4Kq+04I}j>bsxcOGh!MTWo_2imaqRxy;W%3=l1c8{fK>DH>uGlXOFayJYQUV42p zeJQay9e*v}&DjGsxqow?EX@6Q?eQw3@K%HcaFlx$TT%n%7FQ0O9e#1<>{S)UnE}Hvrpke}J0wDA9ZB zVN$B-%di-ka7gw)(zGqidvUnn5u9_8>E5%gXMyC}T&Z$-3Vxjb0QKuaFtP$8-Paid zcjf;8)~@-*a@gjlFM8&h7!+ZqAmf$Z2jxvWT<%#JLF^=N=qiDIfaYDQ*}-T^@zb1- z>rlydp@WYwjTEzNYS2>$?+UAmSYr#+IZFFQrX`Vp~(4qm^3_H`GJjPZ{zt7> zH|itFxLo=poF7B?YN3R8y@-}2uBN0OFlcX5S*>j(LBK~rxADlwYRtaz1(n_OvkfxJ z1ZLb3Vo}NVCl#AD+NSJ<&!KV8PfmY9RO5FU=)~lJ3F%($A1tReX+-ksb z#-%t}#EYE&0CW@1Gx%fPx5Q;sVH?H*p^luLoQiQ-SSHnNiugPL57V4u@}+A#1`Jj> zTqya9l1_gw^P0Z*=@ffJF>GEz3i&6f&dmNqR(!9UsTt!^8FEhDqa61*KTPxOUp-R= zXR+$X6XsKz*1=hn#S1Yc1HlLJ{{ZU>Yg^k#9zNC*YQ%HM>9wUKV`$5_ z`NeG<6Ku`mk}b4W3xyVO0zfAbWd3=j)9q}mqA`nDwvmmG5MT$V`Shx}Qxcyz5~DeO zdG^gZ1z3m$qhNXsiZlNJ*RNuRR;Rh=)yHy{Z*+Yo^)UKopT8e{t<*nmrykwKZ1_>_ z^c^v6t*;_DcEPZLEvOvuM4beorqbQ$URRXrO)u+x!ZwMgSC zIT3=mKZSHMG&d>x*(H&V4o>fGfSi7&ny}iV97wxC#@(BSIqo^mYudtP^e~I}Fi7$- zIJ&rpdXl+WEj;cslafF}bNF+bwH2sW3Ua`oy`Bf-QTcGsAlokG&oQ#`jCBBm-hmRr z zxBy|?{{SKf`SqtrPn5(>5K+iJX&KM9+)&w3MWoW5dW1OX^PKv2`qZ&#*G(oxs>y-Z zb_mb7Rp1y;u?eSpMl2K8ozk&<=$D0=cH|vDF1_%ZsF3 z#U@NFN$9BA&wTyw^NM7a7cCnFwR9t^F&#R3=kdib-`q@mmERL!Z9ZWNey2Qf?d#Hq zmNU09T2A=>Ba)*&%F&=s689t-@;>xJS&27~_)2ppp5Ke>yC}=vFYEt{Xph zbAjKUKc!lYRk=x91Re_wzpoV#zhXc}a85pLxc*;_SQnc{*4jZHpEv8A)-p`E*%Ly9 zfL=)UpSm53572@C0P3ZZ-dR|rF{yPtg~Eb8G0(59Mg67`3PlUVz-MsF4Dd2Cc^e2*e6ch-tli0oi=~0E0WDTHqJBMF> z#;OsXvTSVhUZ2W^V$7p{f7f`1O?u3KB>m01^RkC@<$ zdT=^*{{ZV%R=c=#Dw49YoDfdsQ_nz09`yJwZdO>s$r3pPnIqf!f={Az~R$ z*kfb6vohnquRlXnWYO*>j2Nv;#MwK#4~Phm*xykNjT+y&!uME=*G-# zm6@35_h)jkC#T9rdj9~ES~D|6F8g_eEzmGwh8zLU_@D8rQ@Y57o<&&-^3L0fj<^`l zA74u5l9kOp*0wSyghqex! z9CY>iRjYUVGb1Q6C_Lon@aOz17jsxxxoq)(3nx#z&ma$OD%?0biA5)Yk~94?nvU9L zXAaScl>yG?&O39CbfbW02bparvI~$RvbFS$8*WyADUEet$}lB}4;bsLoF86vi>w zXFS(aS2Inb+&Z9y23a>hVjDl<(;oBMK3Z4_(MjvTaY4 zsO!~9&m9jL{{TNq7R$M+O&&u+Q@06l54+B%>qaM(E*+Cazs^ZhE=x`9=Eu|DI; zQyT&YrUQOLm9JG`X;m%`=HSSC4tuKu{OdZ2G-(YPm@KD_zjtyFk+nqjCuEP?$XM&eJnz?|%2czE38# zXC83DVI+9L#tHiM;=C%9+3LZfq!tKV?U!qEGOf6b`t$s&Qqsw=rEaiC@dP*>@Ny0Z z^QcnZEGzj1_y-CI{#pM3KDBfm--?ddXt0R&ojj_((JEuRTYs^@-<~b*v@O$+* z%|Hq`;nM@A0d*JgX(zo+90;GMr<^xjPeY`gZT0M>rN|WjTh#QAPx&Fujkje z`qM1}nE)juJrBrv`kmFG9h_GU#kT1fZrl=O9Z#YE06nX+tTZyD$RAG<723@-V<&z= zPsH=@RBkQdg;BD0l_7aR2+8L)uPu~m8Io9S;3u83Jg7hi)y@avKN_VZVa8HeoP*LS zRF1x<+upu%wL7!fr!5SLZPl5|A=(Gb!~${a$NA#3?kx!{!ZvOWccJI=ALli)@IKZ^ z(ohHR1qbmYkJsr`WVP51)scnJeFjCwHnH^Jnze5WsB74#IbyC z7t>qI;UOS!P6)>yx!_Y;2}vU!SjQ?c?}LNSUTVu*Aljrk`Acmb0QKYp=}@ll;6_|1 z$IOYl8TQBF*0#0Go`YhA!w?WM=YzRV>6(02&oThZv>rhySm&w1H6V@lE0REEILusR zb>{>h^U|~@(b_vyW%K0)Th7@Mszi5f0C0D30Vm%W}v{AoLtt+b1+z; zX!DLw+$(~AN_f)L$$kM8qc zsqmZjZt*X~y9<48(tS2PB`*HYaGHh7q=#nHAXQkHV`~g$hd!9idHBqB8wE+#uQZX_ zg~C?E%~Pctv&wArb-B5QH$P|d7$}l$!EjFmj(Ge(#;odo9EQ^FTX^kdcmT+88ozSA zc8&up0C>*dditm12kh(M?+AEfRq;QEJS(bPOJN`Gv^^NK*7B-c`D~HfBLdR5%u5n} z;}gj_^Bs3f)-CVt&F90tV(Q4;U&|G&Is}%nq;bq5D)L6b%804f46<@Yi{z^N9}+^nh@ zcVizm+;-ytDeqd>5=vx|Pm|>qC1X}h4?&(uAa@+oTT6M9iJkM>7Z~-(8RxIJ(!R!0 zTeIg>9>~LM6)aDfB*-8Sl}PL}hRHlsa_P$%1@X9cjz!NNzTYIN+Bvxioov~vI z3H2x6^{#n&vvuT${IW>MuB)D^NCe>bz+EZ2bu1ALljG z6xuRvEz1{I5`Y&G0y>vq27llq=}{fhj46z_Kn6%7>xyrgC9crU6^}nVl*hMne>yzT z<^mnu_bVph`kL;7w#Ou$#C2delW%RkNI$2wBBZWH8-J+-^&HYY&I!wuQ=Faa`Shhp z7a0s>&TvYD&2`7j=X;J-lVBy8fgJEL`kFSeU*ld6KymsSX=P$_<*5GvaCFDHrBy|7 zz>J)e@~`9D`qiRci9lt+4C=t=J55a(#Gf(Qp19n9pr}?hlK?8P0P++A`WkXaJVum~8b$yO)&j-N`i0y!#Al&hSDTyf98HI%M#nN>=!7*s#}Q6HsD zw=2dG6fQn#CvG_4@%*#es^Uh9&ZVDXqwhS8@thvqVASSvq=_7`1bl&a1L`x=kEd#+ zld~MhBt=`V@52v%qdloU*c0b>5V7E@jo^&+$>y_`rqOmCEzBiyQRFN`6*3hh^Xc2K z>r`4(Dvi4#LJugRN#_He5Bcd2d_^3YI{~_qyS@cH%t%fRilv<@CO4Y9FJ<67ftJurH5gHeuV?U?)#a)C(-@TDXoH{npGgKm<%U4DLM_fsjBj2rAj&NUbEuJvH z-Sp!=)t!!%^e((eoUEHuIVYaK%hslil@oA94bZ>JFgg$Oky+u8x&`?%Mp$I?^f~ z(#RE5u>o>bvNAh+o+?9gHqYN<1b6BDhI@B4wTMKocy}_aVHA#lCw*5%?A|L#C9A`e2+Qsl%^H7Q;{?xaVw~}Oq@sLh%Sdb16 z9Z#i3p9n2rS&BCDgs4v|_<-l`Hby(-cKTF(Vs2JnsZS2ly^dBd6a`=w^sYc8$ty2U zr~vh?3r%pl6AsB4umq7jDp>vpcdtLE6`-F7uHHc!EK%fd1*e@3JxBnadX^&70SZ5$@atHwR&N_5GC{^a2q%}qKMhy2a9$Ml>18*yE?}O}4aC?rG znKrR*6KhYWf_eE$fDQB89q++AyPJg&>e_w6l`L9$t?l;mM

JbGgo=BdT-PhNG~YW7XHaQ)lEi~>O! zAQRKJdW@Rkb5r<{(xW)+jM%RObvbT1a&SHHKOX&R<6lBlEQ%!@l2xSR=nK`TJ_>lJ z%sFUgGA`!PU-V-3lRQ=Ib&s>5(!;Vm- z^+!}^SE@1Nxs9SxxxBJC2{Lr+*OUDIl_PnMf7HqYWUf?W@xre>RQ~|*r95GI1oBCB zZ*Ar6*pf{!+}Y!ho`)aSoni22#RdSzWl31L`R>U8fs?o$y?YQ%L}3f)H#xoHXU~*B zyexx1-8}Mo4cv6^RS}_i2n_7Qu3^vf2im$@KZ8Cx)D@n}?@PH@oGD<$ssc#Kby9$5 zjF#_HN$_{ZDyk(m0dhdh$ieV4@&%2H4}4>TUX2>G;iG3INKSnWD|dhejK7z)zV^^R zAQV-|#h49?&<93ODKL5HL4JCAb6T30!Ot3L1ZPdLx`7lQJ{!2)FvfZkg!DbKIuY8l z?{sZzNSST!bgf1kfVdGwWpup;NgFrt207`-HPq`;lHA5L>PK{03uM4F7dS;9e7GF< z7y+1mA77=t2gB!HcQCv}AmF5Arz4Dz19E$ub*^dKSGi}|q-xhm8635(&nG{Z z`1Gy)BTn(EyoScsYd1mjW+NkELxoddTqdZaDx)sOFcdWxW9Y`4J2^gx^GBT@`W!*>|XLQrHCiCaJ2amVLfwXeZ{9^cum!e|~;P4a;y%0-MEbS?D% z0PFUc{0I1jEx(@@gB0j67$&6w{D%H}ao5)sP=Z^@5h}OYKe*wMPXHo==m9lqBtqMY zs37z65sysqTi1UHymh9@w&TKg%J?n{1bnDn#PE7!^CS+YiM$Qs-98^WZw=~E#2?CL z`y7pjz$H%5Ks@6Ip1teRq@c9A`_4JaPg_5@qY9zJF;T(ie?!{@)$4Q)79%lZ$=khn zA5gU>nc>e9>ME&eqfI*=0ytG>8OKIoPbVLaE2N9z$B!-iy}lV~ywC`W31F45Pai7C zNX`#jdiJkYm3lFDl-J&l8PlmHbr<&-a)6oP@W2n5{!zzpK+oZtoJSy4VR178I1FDs zx&WO&$E|dFf5MN57uS*7Tlhx!Ov$_}Pi&b`GEQDW*#4rnf8j%XQ1X^h;4MhRxpORY zT1ctym7k{v(;U}o$Ke+BV}DbP;cLC1?dWjGT02As@3H(d( z4~o1EHHMGyV)hLYA>AFjYf%{P9sKDS2N)!YRd*f3nL$14kGS#2#!YH}5NMwb{4b>F zI+mR~HSL|v^|b6Ia}x$~@;OpN+;NP4=y1D+>@NU*)0!`az66}u;!XD$Vr}(132FLtJ_<5;#55)JHZlwzkDhMuG?kfjd zRY^Dry&^Fo4jD0#_i_dea*)WZSiF{z#&AD$n?NI`KX#+jlURN_{h_rDcxy0g+QrVT zYjtTYtajJei2_;5*AFar5U=paio!BS-c{!S;CRKS#O*^yv4cv}FEps`UG{%yVoR%= zhG*k*F=SAohyhe_*F9_WoU=BlsK%BmH)_i3s!PwU&x^+4DLSrLd1&>v{d=tsd^Yi^ z3?UPc#BPu&E)UdqJu24qIBA^D=0m9%N*5cbpd!xu#_c!UFU0t2iG8T z#~^=NXcEmSX(x_GV~xof;CBS)KG+rZE-fAn(KN9r17z*D`>dQ}=mtpo^r-|hzs=^x z*e8`kfIgY}^{Pir00uFtZs3y6r16YrBiz(rh*)M&*v}$n4ftapsqa&3h|rcKJWC)M z$O2X=lhdf<{Pm#PTed^C0*$~X=lq9HPL)&b1Onxl0o=?_=a%*Nsl!7$1q6~$D%+bM zkv#M3Q!CiDVsQwLFa^J00+o3elTEK=CcvE*7jajfU*No%q z{Hiy5a+wpkll#+7V<$N8)c#b(3h`}bkCDoapq{+sll=Qr5(JR^j~hNR51o_OrrcoG zE(@uulPeorHo^yaImis`GI7`L;)r5LMv5relynCtIQQ@Qe}xYAw)qb{Am?rl+y?v_ zp_b;-5?R$iAI8Ar>svu73!9wdI6@9SnJ4*H zc-1WqRLr}vwE0EXKVF~Cq_@tbwVv82}w3zcw&zM9()m%-ix2{ z{#6CcCNYmUdH(=yFjw1yP`%Vv#Iwv|-OxP2N59L(LH6rr&Mx;i_hXap{=WUIvYbw7 zw8eN?BzzSs&}~!5{CUM9M+$<^<>k7cFlLz7urUE%W(SX(9RC1Xo+*G1su@V?TR8r8 z*p6n11KT{VzEqCDXC=@R_5T3%)X~Ko00e$kPs+Ii>CpW>X{Pqx76~MyJqUgX{P9U8 z<*M{`89fz;JXNg>*)qi$`SylfeB7wV>qIuL&AK?%cXTSpkHi|MCbbBc%S^kez#*~# z@86o8Z^Vs}@*(~$u6g=qtx0WiTI^~ICs^|&tb21a4xNr%WBm1|M-=Q>BC9re+bGB$ z{)V83QDjUR<6XTBkJs=#{sK$7!x_b)N9Qu92kCA6>!v_-( zG3(HS+nQ`TbIJR$Czm4R2+YxffA6Trw|bgQWg|QO&j-vmVV4`d@wEDS)KYkc%%<5R zl@*3bSdaBT%vNoqWRV@!h2(LtWQg&SWL^3E27N0fo>Bm48-s!sNCf>bG4!i)>)L$z zSjxv5;1;`Gqnze~$c9#br`G+m*>7Vi{s_Hh90k&P>^d(LQe@dQZwal6(8(EZXBz5RA z4Li$12pK7mFvB?fsiI^mNZhzQLmnjXJS6<41Pd$?ZqeLTuB0}ZHh>YB%`~-Ozr&=QSIfXiFPN z?w|yi^!b0M8T~udkc*a5>g0jX1%?l&I0WXPO-W-`GeXYT<@~1&>DP8T{{Tvk+f|O} z>=ulk*e8#v&1)8xfcuwz+bIKiBpl&g$RpeD06z+rImz=kEGWH6S(u)Foe#hG3a9;~ zBL4uE6Bx(weE$F__36zybxS!FcRb)8cD1uGC)bthTBqHTXhC)*c>>`{{ZWXwA0*ajkodc zsSyS4tVuXIdjQS2btL&j_x0zKj`e>__>!=Ew>B1+1P){>VPXex%dnB(o(b#HxJ2;O zB^g%ID6-fjTAYJ{o(uNotjFQMo6JXz3w07&C?EO}9{nrGN~6)l)l}Ggq-HTwu zBL!nj{YKBGDPy&mrhe0JAOmSTnT|X2v|v`2Z*)tkZNDSCgU8x%mCe4Csnvr0v9Z&G zfs76_pTiwZSJl2Q*(9p5MQXCE0lDtXs=#Nnp&p!Pw@yuQ(k-?LY1TG#m2|ewdhiPa zkI%g_c;l3*7qf)l(8iIm>(FiF{VO?BeLt_nrCQo0`5l7%YI4{S+q&(+irmDR>M{<% z0m1E3!|}??kF;r$FfF(+o&pCLP{6~&=bvAG4RG+;+#>}gmzS8HMzTn=t;OD9I z<&%6@@XEYq2`}VASIiPe4B7txWP%Wp)132OY%B|?-Yulwxy&HBJu&j<-k&#zq1%_9 zNb!&LQQ#k@0X_X|CrT*S+Rf~bNVfRT;mBnu_7^u8A2Z64HbzHPGH3E5@~e^jXwWvv zac!mQ(G_4qBIC?ajyYxvob}1a_fAGUfpl2HsE+33l0YI!%JZCHboBS>Q^#V_4a5R* zjj~A3hVRMX^zBlpMZJaX=h*eJ@yAYOUpGg;y16RB7UuRy*CU|>IX<1cW1iWK@y|=J zjgo&2Ufe*W9LKH67SBws7%=uce@{2K(IK22&_tO7aY&SBpP}ZGKMtk5fJO>#Jwl=p z`f*#VI%>&&EXmiJJyS>c{o%Opkex4Dv$k!EB3nrn-qFtEmkXYYjt76ObeDe~JTYNp z;v3CdS(^&WZ;M!C^uR*KML59coQ~qWV$#Kl3etJSxWlqH>yK{OsV+P-rZNLOAR`mE z4#PO>z&(0Y%MW`RM-eMp$F0Tj&rH3Qy!gCRa~jE&MO-N0WH!ej_WEUXK^Txb5DDlpyC?8HzY#t4 zoF+9GaH;}*@YYLFLSVu$7iV2qj)cBx94?kItdt>GedcMMUn8c-Fsa%{q6AE3XY|jJt`EF0SKE znESgFLI=yi;|D$Jf_~MSI>Qn};fbce*w)Rd?2G}OtP8A}BaU#m$9{vu<&4V5GdVfP z+Rgm2P&JwcC8G>_jrckI#wn~lJ+1kXEKOAJb@v|VpS0`-=udB<>RNPfxJs6H*7B<^ z0NSDLTc@b)p64~u>3_AZhh-EKOAfgAqXuWXirt9-@#W@ zHzx?K>FO!s*4bb^o`an8n&2)!Y_A)(qc_>??^-5e$Tb_QuQ3LA-0(>VZrxWI?ag@imk*Q& z*rF$f{k(i`XDy`ocfyt^PD+&XinmR}_ltv{ui;)D zBTN8xn1|qh8WGMh!T0G=3u55!Wtex~7-aL4g4p%VYVyo8`v{Ni97pqhM{ncL+Amx9 zjbj~_x8dy-79w(NneCkBHc|;$Tl>WD6yy$gt}&BL*CV)SEN<*)lW=(Lb;(LLa56)w zEJ-Aho^#N5d`!^BqjZBj1x@E@A4UVWYPDy5DwF}`8-@X5%)p-6-;wL@j@9znj#ktC z#)r3q#m0Ko)tNS44zss;puM)&in$n)4Q4--4n`EggPxszaZjJX_K@DkaUI`?E-i|J z=_T@{vN-$LV59-ibDUuQ8q&=x5bPEfVV|4L$v^kjJM_u=)#G&{#)wi(c4;>h{GGBe zeXGHq0~H9SntvnIr&|o&tWaNumU`}&CB(mNR}zAv=Tm6{uOB7^X}FWz5rBGTtJ(ZK z(KMK5AMlB38imPIvf}znnVERw_N z3*Bm2BV)tDcxEyaf-*D9lh@ZD*1XtZanq@5>F9ROrW!HkImgzu?+e^qjXK*wv00Uo z+Aj|2%eh#BRQ}_Uz~JVp>Ao)Uc9>Au>g#55qc+y?#kU6|Zp9e&;~nc(a;1Y48sw0~ zu0cNJAFT&_FkPGT)P%tp_s{EJqG2-XN|JRI+3~oHRbOcNB+@31TiA*{)yhs}KkFoU z2x4=`-s(y1-kEoEsfA$(w%AY&*pw9m9dLVoRc<{-c;g{=##~^QU^jho$G7yTn)Y&p znU)|9aFYc+bJ=~#R`y)$z=vz&yqfGH#zPx$E{gdwWk42pwVZ7s}me^ z>soS4Co11Efv}@6$EH4?k9vYw@Boe|V{Qn*Ju%m(u;!-S*c9Z_<#O$DsNBpm&gld| z=kfHbI7p)kFyVP7OQL&!x@to_p|ib?8-f%!2fxkzJ!#}i(3waFXbcn&%nyFXmqD!v zmK9|qWLr-Jg73&ZM+5m%!x4-D2@#b&Rat)?SaVjAQo|BOybceT#j(%n-;wW2=iqR$ z5<3;n54mIhc&wSKGi9|)mNHC3GjLAvjOT&W8c}Iz=%^MpCz7pj$3IMGpQT@T+T-Pp zS5^RjswxIOM_QKpMv z+l-d+MsRsaopJaRR@Ua@hEo~mKPe>t06u@6NZO>TpqR;Xo>vN^@g&pKR*9T^Kc??i zSmY%35-|ty8TY5fqua(=po&3`3zA63;-~pl@p*BAHswb+mR2Bpa48;DET&u;0l?V{ ze^;$yjM|FT5FIua+_G=nf=DU&eK0xn~K9qt;QTfjPrzD@|C}RP_?R?{=6n{MN z%^x5;utJXBWNJS!D`d>dHlee?N|}!LP_eaZciS-6IKPZ1wx4;LAQlS3WJ}b(_?5ZAW1__w3Qh(P=Q;Hl{HjJj2Jugo#W;5n#zhWybcWw8aeqh*Qc7fKG`OdDHbBRc=oz7CH(ROk66v$N{uVp>m~sqBzHVIhN0N-Ed^{k)m4>W^>IO@k}#&eE@ zpVFNT!n-f;6akO83GeKu{Nl7sMgIUI7Qivhxb$a3CPDAi{{SkFY-mVg?sJ@&ypp$rP>{!$0Cm-G_N9Fj`$nyNNnIqZ{8T;AKuLIlht=mmH+Uw0_h%NwQzvsglS)mZF0|aJJ6`Q%_3OOF(a-vN*ljqpvjjX4V26^BE{Q0Ia6U3#J0VIuzgA8%T zG6!6aoyX;dBxBcPHntB<@gbMyco#8lAe5Zl~bF78#zZbVh%fyW;C=QTX7Xj*qN z6vm2K6tm+W>cSJ(rdS+~_{~8r(R{MVOo}qtVUjWVp5x~B6|Dx76u@KUCDbcmWCOVM z7|-47?^PZ&c$LsPWQfjOlgQxb+OcVxMd@;=x{7BFxuXX=SLF@tal1c3ifofccCt#x zKfuZWEzbwg4|7xbDRU9n3C&dWv-=xGz1mRe*p(m*9DhH5iB7|3^xNXs_`S0~#iAZOImeAyxMJfek08;tb&j-!shUrI(qhvwSHE4e^opdP)t z^Ze>Kuf+2zZIBkoKfU9DoPYqq=lS~ARCPx@iN(z9zV_v$@j5>268d){{ZVJ1WPr;+6#djHdHf$PhL3( zx!|61I`f(y*AiOU5zBa9E%JGf)8>)E1mmvJfI4QVN_^yJUzR|21Ws2zySJS5Bo+Sv z>ry76x(kK~${`_!8zqUzIV2v#>+e(Q?Jc~D-gJ)4<0m-hBxjDDJ9?k3VA-bSMj|v_ z#T1gj1Ci6PBxLqK&#gvWGT~k(kc?ztE=OVb{{WHM(nNwRk@Klk`FIB>8NtuLe)Pbb z*u+R6p>vVJ!Ok!-#bvnLXl#L1Ncnls82WV-(HJZNWdkGb^OI1`Foo_wU-fDZGr$}RFRu^Ciw>qAVc3QKauvSqFt;>lVAXh7r@3b*ELEuA+~_I zW%-F2{JG+luGxlEY-L5s2dFvY^!;n1D9N)%Bm@sFRQ#tSc=?Cd{Qm%2nk~)yv;!n? zBW&^5`}+gIMA85vq~`=Kzd%;ELWRF2;|U8sSQe zr`4W$$fxgW3NOq)`8Knypl z{HG_LFmg|N=uX*$f)3PAC^G@X#PQDq8NuiI`%@Zytaqtlj&Zn-qaff8I&s#k zE&J?=5mq*oZw?e4yc3Rk{n1Hhs7-M!iiAhy0C>(Z?b8R7%_lB^k)VzpHpwJ!C@r;@ z<~ZxePkiG)ojMU0%FJM6o~AYkeSssMy*T>OERxz<$2v;gu&0nSob{*uxW{&><;Lbn z%Wino-~rQsKR;i}x#~qd&bn=-pe#v;nnr-AJ6x$QH+=xW{ZBPCQV5xhcUGn?qvdEe zdUKM%bR(yyLr$}6i-&jfwgA5>s2~nT0l@_RJkjPz*K*pI`@?x`ag1jhb6k}v#jQ@5 zRB7m57iu66=0Y9L3ky-xu0iC0J^eU5R&3W6*8!Ly4Uw4?3}dn9{{Yq2pBz%Zno>y6 z6dx&tPX|0_ufJZkQd@~_1_pS)x;-z$UO1o;n83^918)E@B zrqiFV9@H|&6FL6?SlGa3Tn}^CpGtSw!(A*gF_R-|oL~>GIXv^!RKm%lwlbGfw2p{D z!5uS`{HxT9Nm%op-lb`TVn7POWIYF!h9GA=40`nh(Ge0ZRaQ20f~9t2@)qNy8}3=15BxX&5RM}K;nD|VFvB2afaq@0(P6LoL^&I|W{{SkC#;Y1K zqT${6IXL9!zkYu@i7rf=l#FhVe;u%KP}3<{5=cPF5G;&HV3Cq@>UjhFKRQn|jhUBe ze6oijbHVM5^{Y@=3HQ4&Aps18aM=8KtjKQ?OM;1Rir+R1bI^NZ)}?AP*?5^lGs2n6 z6Oc#OAY(KtI9=&-lY!2Dq~v29{xuEO>sXARBLKq-wD3-O>U{?k-#y#!W0CwnDeLMv z_v_ZFCdA~6es0$T=8KH(+5jDUW0B8FiLQ%)aI6M+3{dq4JwN*1wHS@iU`TKV?2(ds zoE-3adUvSp?pof`(rKdaGx`i#+vt<4lIjE;lqys46?Kl|+ z^yZV#C1X82)?8o#w(xX}SWy11!>T&*mt!#{*qqn<-F__34a}fmgllt&^Ji%T<{AG0Jxz2+xsw@!*-%%H;R}O~$EW$n zTotfNX)t&(}WvJ!@2EF(N43v~22f z6vRgz>Rq82;$#CHf}{X?bj4L|L7|vN7j{Wexa1GR@vS&*y8QVVb9+ zxhW=Bl}5&CBg&N|oDv7--RWYqR6BrWA@~7C8;*k%>8=8c z$f)~|IbWdv0PE1%NnDCpn3M+tfB;ltNc<1^_NdtiaIZ6MBaO+y824dUF9Eex+U^4> zKPwT&0meF0<+Uo#OoW~?KmhUy>-bW7Y6%&#!*4e&A|7*=L!YlX$*3fT9feinRy^hy z!TyyqT}UMR*&OHPUf+c>Y2+)sunNQisxW$-ejIvKy8&OxUAtY6_knO19X+Zs6fD>X zpk$Cp&MLw)$svz$&KZLdoB@t1In04m_s(}V(VQH1^rR*+Vr+57dTd|oLdS!EPBYK| zaZ)Z;^%y^eBA1yEw;wPsEKE7efrH!Y{{ZTzuAb4B zjgHaKlBcFVqNYA#xkiHqMZd z*hV*#^)1a@j(H(DOWF9&08&1@kLj9iz1+!P)8=^ z$po?B62#+?>Urz>R=3bRiJ`lJf~iOW&Odi(iakgF0IrjO8Qa+ gW+E0MG?ykJo_lrn>-tmMOkmv3e*j1Irh`-e*$zSK0{{R3 literal 0 HcmV?d00001 diff --git a/cnn_class2/extra_reading.txt b/cnn_class2/extra_reading.txt new file mode 100644 index 00000000..d68f40bb --- /dev/null +++ b/cnn_class2/extra_reading.txt @@ -0,0 +1,14 @@ +A Neural Algorithm of Artistic Style +https://arxiv.org/abs/1508.06576 + +SSD: Single Shot MultiBox Detector +https://arxiv.org/abs/1512.02325 + +Very Deep Convolutional Networks for Large-Scale Image Recognition (VGG) +https://arxiv.org/abs/1409.1556 + +Deep Residual Learning for Image Recognition +https://arxiv.org/abs/1512.03385 + +Going Deeper with Convolutions (Inception) +https://arxiv.org/abs/1409.4842 \ No newline at end of file diff --git a/cnn_class2/fashion.py b/cnn_class2/fashion.py new file mode 100644 index 00000000..997322db --- /dev/null +++ b/cnn_class2/fashion.py @@ -0,0 +1,109 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Sequential +from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization + +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + + +# helper +def y2indicator(Y): + N = len(Y) + K = len(set(Y)) + I = np.zeros((N, K)) + I[np.arange(N), Y] = 1 + return I + + +# get the data +# https://www.kaggle.com/zalando-research/fashionmnist +data = pd.read_csv('../large_files/fashionmnist/fashion-mnist_train.csv') +data = data.as_matrix() +np.random.shuffle(data) + +X = data[:, 1:].reshape(-1, 28, 28, 1) / 255.0 +Y = data[:, 0].astype(np.int32) + +# get shapes +# N = len(Y) +K = len(set(Y)) + +# by default Keras wants one-hot encoded labels +# there's another cost function we can use +# where we can just pass in the integer labels directly +# just like Tensorflow / Theano +Y = y2indicator(Y) + + +# the model will be a sequence of layers +model = Sequential() + + +# make the CNN +# model.add(Input(shape=(28, 28, 1))) +model.add(Conv2D(input_shape=(28, 28, 1), filters=32, kernel_size=(3, 3))) +model.add(BatchNormalization()) +model.add(Activation('relu')) +model.add(MaxPooling2D()) + +model.add(Conv2D(filters=64, kernel_size=(3, 3))) +model.add(BatchNormalization()) +model.add(Activation('relu')) +model.add(MaxPooling2D()) + +model.add(Conv2D(filters=128, kernel_size=(3, 3))) +model.add(BatchNormalization()) +model.add(Activation('relu')) +model.add(MaxPooling2D()) + +model.add(Flatten()) +model.add(Dense(units=300)) +model.add(Activation('relu')) +model.add(Dropout(0.2)) +model.add(Dense(units=K)) +model.add(Activation('softmax')) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(X, Y, validation_split=0.33, epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + diff --git a/cnn_class2/fashion2.py b/cnn_class2/fashion2.py new file mode 100644 index 00000000..a5c7452d --- /dev/null +++ b/cnn_class2/fashion2.py @@ -0,0 +1,104 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Sequential, Model +from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Input + +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + + +# helper +def y2indicator(Y): + N = len(Y) + K = len(set(Y)) + I = np.zeros((N, K)) + I[np.arange(N), Y] = 1 + return I + + +# get the data +# https://www.kaggle.com/zalando-research/fashionmnist +data = pd.read_csv('../large_files/fashionmnist/fashion-mnist_train.csv') +data = data.as_matrix() +np.random.shuffle(data) + +X = data[:, 1:].reshape(-1, 28, 28, 1) / 255.0 +Y = data[:, 0].astype(np.int32) + +# get shapes +# N = len(Y) +K = len(set(Y)) + +# by default Keras wants one-hot encoded labels +# there's another cost function we can use +# where we can just pass in the integer labels directly +# just like Tensorflow / Theano +Y = y2indicator(Y) + + + + +# make the CNN +i = Input(shape=(28, 28, 1)) +x = Conv2D(filters=32, kernel_size=(3, 3))(i) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=64, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dense(units=100)(x) +x = Activation('relu')(x) +x = Dropout(0.3)(x) +x = Dense(units=K)(x) +x = Activation('softmax')(x) + +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(X, Y, validation_split=0.33, epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + diff --git a/cnn_class2/make_limited_datasets.py b/cnn_class2/make_limited_datasets.py new file mode 100644 index 00000000..2b27c83a --- /dev/null +++ b/cnn_class2/make_limited_datasets.py @@ -0,0 +1,39 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +import os + +def mkdir(p): + if not os.path.exists(p): + os.mkdir(p) + +def link(src, dst): + if not os.path.exists(dst): + os.symlink(src, dst, target_is_directory=True) + +mkdir('../large_files/fruits-360-small') + + +classes = [ + 'Apple Golden 1', + 'Avocado', + 'Lemon', + 'Mango', + 'Kiwi', + 'Banana', + 'Strawberry', + 'Raspberry' +] + +train_path_from = os.path.abspath('../large_files/fruits-360/Training') +valid_path_from = os.path.abspath('../large_files/fruits-360/Validation') + +train_path_to = os.path.abspath('../large_files/fruits-360-small/Training') +valid_path_to = os.path.abspath('../large_files/fruits-360-small/Validation') + +mkdir(train_path_to) +mkdir(valid_path_to) + + +for c in classes: + link(train_path_from + '/' + c, train_path_to + '/' + c) + link(valid_path_from + '/' + c, valid_path_to + '/' + c) \ No newline at end of file diff --git a/cnn_class2/ssd.py b/cnn_class2/ssd.py new file mode 100644 index 00000000..12808b84 --- /dev/null +++ b/cnn_class2/ssd.py @@ -0,0 +1,133 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +# simple script to adapt object detection notebook from +# https://github.com/tensorflow/models +# to work on videos +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import os, sys +from datetime import datetime + +import numpy as np +import tensorflow as tf +from matplotlib import pyplot as plt +from PIL import Image +import imageio + +if tf.__version__ < '1.4.0': + raise ImportError( + 'Please upgrade your tensorflow installation to v1.4.* or later!' + ) + + +# change this to wherever you cloned the tensorflow models repo +# which I assume you've already downloaded from: +# https://github.com/tensorflow/models +RESEARCH_PATH = '../../tf-models/research' +MODELS_PATH = '../../tf-models/research/object_detection' +sys.path.append(RESEARCH_PATH) +sys.path.append(MODELS_PATH) + +# import local modules +import object_detection +from utils import label_map_util +from utils import visualization_utils as vis_util + + +# I've assumed you already ran the notebook and downloaded the model +MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' +PATH_TO_CKPT = '%s/%s/frozen_inference_graph.pb' % (MODELS_PATH, MODEL_NAME) +PATH_TO_LABELS = '%s/data/mscoco_label_map.pbtxt' % MODELS_PATH +NUM_CLASSES = 90 + + +# load the model into memory +detection_graph = tf.Graph() +with detection_graph.as_default(): + od_graph_def = tf.GraphDef() + with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: + serialized_graph = fid.read() + od_graph_def.ParseFromString(serialized_graph) + tf.import_graph_def(od_graph_def, name='') + + +# load label map +label_map = label_map_util.load_labelmap(PATH_TO_LABELS) +categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) +category_index = label_map_util.create_category_index(categories) +print("categories:") +print(categories) + + +# convert image -> numpy array +def load_image_into_numpy_array(image): + (im_width, im_height) = image.size + return np.array(image.getdata()).reshape( + (im_height, im_width, 3)).astype(np.uint8) + + +# do some object detection +with detection_graph.as_default(): + with tf.Session(graph=detection_graph) as sess: + # Definite input and output Tensors for detection_graph + image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') + # Each box represents a part of the image where a particular object was detected. + detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') + # Each score represent how level of confidence for each of the objects. + # Score is shown on the result image, together with the class label. + detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') + detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') + num_detections = detection_graph.get_tensor_by_name('num_detections:0') + + # instead of looping through test images, we'll now loop + # through our video! + + # get the videos from: + # https://lazyprogrammer.me/cnn_class2_videos.zip + # and put them into the same folder as this file + + # open the video + # input_video = 'catdog' + # input_video = 'safari' + input_video = 'traffic' + video_reader = imageio.get_reader('%s.mp4' % input_video) + video_writer = imageio.get_writer('%s_annotated.mp4' % input_video, fps=10) + + # loop through and process each frame + t0 = datetime.now() + n_frames = 0 + for frame in video_reader: + # rename for convenience + image_np = frame + n_frames += 1 + + # Expand dimensions since the model expects images to have shape: [1, None, None, 3] + image_np_expanded = np.expand_dims(image_np, axis=0) + + # Actual detection. + (boxes, scores, classes, num) = sess.run( + [detection_boxes, detection_scores, detection_classes, num_detections], + feed_dict={image_tensor: image_np_expanded}) + + # Visualization of the results of a detection. + vis_util.visualize_boxes_and_labels_on_image_array( + image_np, + np.squeeze(boxes), + np.squeeze(classes).astype(np.int32), + np.squeeze(scores), + category_index, + use_normalized_coordinates=True, + line_thickness=8) + + # instead of plotting image, we write the frame to video + video_writer.append_data(image_np) + + fps = n_frames / (datetime.now() - t0).total_seconds() + print("Frames processed: %s, Speed: %s fps" % (n_frames, fps)) + + # clean up + video_writer.close() diff --git a/cnn_class2/style_transfer1.py b/cnn_class2/style_transfer1.py new file mode 100644 index 00000000..5f5a0057 --- /dev/null +++ b/cnn_class2/style_transfer1.py @@ -0,0 +1,167 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +# In this script, we will focus on generating the content +# E.g. given an image, can we recreate the same image + +from keras.layers import Input, Lambda, Dense, Flatten +from keras.layers import AveragePooling2D, MaxPooling2D +from keras.layers.convolutional import Conv2D +from keras.models import Model, Sequential +from keras.applications.vgg16 import VGG16 +from keras.applications.vgg16 import preprocess_input +from keras.preprocessing import image + +import keras.backend as K +import numpy as np +import matplotlib.pyplot as plt + +from scipy.optimize import fmin_l_bfgs_b + + + +def VGG16_AvgPool(shape): + # we want to account for features across the entire image + # so get rid of the maxpool which throws away information + vgg = VGG16(input_shape=shape, weights='imagenet', include_top=False) + + new_model = Sequential() + for layer in vgg.layers: + if layer.__class__ == MaxPooling2D: + # replace it with average pooling + new_model.add(AveragePooling2D()) + else: + new_model.add(layer) + + return new_model + +def VGG16_AvgPool_CutOff(shape, num_convs): + # there are 13 convolutions in total + # we can pick any of them as the "output" + # of our content model + + if num_convs < 1 or num_convs > 13: + print("num_convs must be in the range [1, 13]") + return None + + model = VGG16_AvgPool(shape) + new_model = Sequential() + n = 0 + for layer in model.layers: + if layer.__class__ == Conv2D: + n += 1 + new_model.add(layer) + if n >= num_convs: + break + + return new_model + + +def unpreprocess(img): + img[..., 0] += 103.939 + img[..., 1] += 116.779 + img[..., 2] += 126.68 + img = img[..., ::-1] + return img + + +def scale_img(x): + x = x - x.min() + x = x / x.max() + return x + + +if __name__ == '__main__': + + # open an image + # feel free to try your own + # path = '../large_files/caltech101/101_ObjectCategories/elephant/image_0002.jpg' + path = 'content/elephant.jpg' + img = image.load_img(path) + + # convert image to array and preprocess for vgg + x = image.img_to_array(img) + x = np.expand_dims(x, axis=0) + x = preprocess_input(x) + + # we'll use this throughout the rest of the script + batch_shape = x.shape + shape = x.shape[1:] + + # see the image + # plt.imshow(img) + # plt.show() + + + # make a content model + # try different cutoffs to see the images that result + content_model = VGG16_AvgPool_CutOff(shape, 11) + + # make the target + target = K.variable(content_model.predict(x)) + + + # try to match the image + + # define our loss in keras + loss = K.mean(K.square(target - content_model.output)) + + # gradients which are needed by the optimizer + grads = K.gradients(loss, content_model.input) + + # just like theano.function + get_loss_and_grads = K.function( + inputs=[content_model.input], + outputs=[loss] + grads + ) + + + def get_loss_and_grads_wrapper(x_vec): + # scipy's minimizer allows us to pass back + # function value f(x) and its gradient f'(x) + # simultaneously, rather than using the fprime arg + # + # we cannot use get_loss_and_grads() directly + # input to minimizer func must be a 1-D array + # input to get_loss_and_grads must be [batch_of_images] + # + # gradient must also be a 1-D array + # and both loss and gradient must be np.float64 + # will get an error otherwise + + l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)]) + return l.astype(np.float64), g.flatten().astype(np.float64) + + + + from datetime import datetime + t0 = datetime.now() + losses = [] + x = np.random.randn(np.prod(batch_shape)) + for i in range(10): + x, l, _ = fmin_l_bfgs_b( + func=get_loss_and_grads_wrapper, + x0=x, + # bounds=[[-127, 127]]*len(x.flatten()), + maxfun=20 + ) + x = np.clip(x, -127, 127) + # print("min:", x.min(), "max:", x.max()) + print("iter=%s, loss=%s" % (i, l)) + losses.append(l) + + print("duration:", datetime.now() - t0) + plt.plot(losses) + plt.show() + + newimg = x.reshape(*batch_shape) + final_img = unpreprocess(newimg) + + + plt.imshow(scale_img(final_img[0])) + plt.show() diff --git a/cnn_class2/style_transfer2.py b/cnn_class2/style_transfer2.py new file mode 100644 index 00000000..704bdbcd --- /dev/null +++ b/cnn_class2/style_transfer2.py @@ -0,0 +1,144 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +# In this script, we will focus on generating an image +# with the same style as the input image. +# But NOT the same content. +# It should capture only the essence of the style. + +from keras.models import Model, Sequential +from keras.applications.vgg16 import preprocess_input +from keras.preprocessing import image +from keras.applications.vgg16 import VGG16 + +from style_transfer1 import VGG16_AvgPool, unpreprocess, scale_img +# from skimage.transform import resize +from scipy.optimize import fmin_l_bfgs_b +from datetime import datetime + +import numpy as np +import matplotlib.pyplot as plt +import keras.backend as K + + + +def gram_matrix(img): + # input is (H, W, C) (C = # feature maps) + # we first need to convert it to (C, H*W) + X = K.batch_flatten(K.permute_dimensions(img, (2, 0, 1))) + + # now, calculate the gram matrix + # gram = XX^T / N + # the constant is not important since we'll be weighting these + G = K.dot(X, K.transpose(X)) / img.get_shape().num_elements() + return G + + +def style_loss(y, t): + return K.mean(K.square(gram_matrix(y) - gram_matrix(t))) + + +# let's generalize this and put it into a function +def minimize(fn, epochs, batch_shape): + t0 = datetime.now() + losses = [] + x = np.random.randn(np.prod(batch_shape)) + for i in range(epochs): + x, l, _ = fmin_l_bfgs_b( + func=fn, + x0=x, + maxfun=20 + ) + x = np.clip(x, -127, 127) + print("iter=%s, loss=%s" % (i, l)) + losses.append(l) + + print("duration:", datetime.now() - t0) + plt.plot(losses) + plt.show() + + newimg = x.reshape(*batch_shape) + final_img = unpreprocess(newimg) + return final_img[0] + + +if __name__ == '__main__': + # try these, or pick your own! + path = 'styles/starrynight.jpg' + # path = 'styles/flowercarrier.jpg' + # path = 'styles/monalisa.jpg' + # path = 'styles/lesdemoisellesdavignon.jpg' + + + # load the data + img = image.load_img(path) + + # convert image to array and preprocess for vgg + x = image.img_to_array(img) + + # look at the image + # plt.imshow(x) + # plt.show() + + # make it (1, H, W, C) + x = np.expand_dims(x, axis=0) + + # preprocess into VGG expected format + x = preprocess_input(x) + + # we'll use this throughout the rest of the script + batch_shape = x.shape + shape = x.shape[1:] + + # let's take the first convolution at each block of convolutions + # to be our target outputs + # remember that you can print out the model summary if you want + vgg = VGG16_AvgPool(shape) + + # Note: need to select output at index 1, since outputs at + # index 0 correspond to the original vgg with maxpool + symbolic_conv_outputs = [ + layer.get_output_at(1) for layer in vgg.layers \ + if layer.name.endswith('conv1') + ] + + # pick the earlier layers for + # a more "localized" representation + # this is opposed to the content model + # where the later layers represent a more "global" structure + # symbolic_conv_outputs = symbolic_conv_outputs[:2] + + # make a big model that outputs multiple layers' outputs + multi_output_model = Model(vgg.input, symbolic_conv_outputs) + + # calculate the targets that are output at each layer + style_layers_outputs = [K.variable(y) for y in multi_output_model.predict(x)] + + # calculate the total style loss + loss = 0 + for symbolic, actual in zip(symbolic_conv_outputs, style_layers_outputs): + # gram_matrix() expects a (H, W, C) as input + loss += style_loss(symbolic[0], actual[0]) + + grads = K.gradients(loss, multi_output_model.input) + + # just like theano.function + get_loss_and_grads = K.function( + inputs=[multi_output_model.input], + outputs=[loss] + grads + ) + + + def get_loss_and_grads_wrapper(x_vec): + l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)]) + return l.astype(np.float64), g.flatten().astype(np.float64) + + + final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape) + plt.imshow(scale_img(final_img)) + plt.show() diff --git a/cnn_class2/style_transfer3.py b/cnn_class2/style_transfer3.py new file mode 100644 index 00000000..b9620e9d --- /dev/null +++ b/cnn_class2/style_transfer3.py @@ -0,0 +1,132 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +# In this script, we will focus on generating an image +# that attempts to match the content of one input image +# and the style of another input image. +# +# We accomplish this by balancing the content loss +# and style loss simultaneously. + +from keras.layers import Input, Lambda, Dense, Flatten +from keras.layers import AveragePooling2D, MaxPooling2D +from keras.layers.convolutional import Conv2D +from keras.models import Model, Sequential +from keras.applications.vgg16 import VGG16 +from keras.applications.vgg16 import preprocess_input +from keras.preprocessing import image +from skimage.transform import resize + +import keras.backend as K +import numpy as np +import matplotlib.pyplot as plt + +from style_transfer1 import VGG16_AvgPool, VGG16_AvgPool_CutOff, unpreprocess, scale_img +from style_transfer2 import gram_matrix, style_loss, minimize +from scipy.optimize import fmin_l_bfgs_b + + +# load the content image +def load_img_and_preprocess(path, shape=None): + img = image.load_img(path, target_size=shape) + + # convert image to array and preprocess for vgg + x = image.img_to_array(img) + x = np.expand_dims(x, axis=0) + x = preprocess_input(x) + + return x + + + +content_img = load_img_and_preprocess( + # '../large_files/caltech101/101_ObjectCategories/elephant/image_0002.jpg', + # 'batman.jpg', + 'content/sydney.jpg', + # (225, 300), +) + +# resize the style image +# since we don't care too much about warping it +h, w = content_img.shape[1:3] +style_img = load_img_and_preprocess( + # 'styles/starrynight.jpg', + # 'styles/flowercarrier.jpg', + # 'styles/monalisa.jpg', + 'styles/lesdemoisellesdavignon.jpg', + (h, w) +) + + +# we'll use this throughout the rest of the script +batch_shape = content_img.shape +shape = content_img.shape[1:] + + +# we want to make only 1 VGG here +# as you'll see later, the final model needs +# to have a common input +vgg = VGG16_AvgPool(shape) + + +# create the content model +# we only want 1 output +# remember you can call vgg.summary() to see a list of layers +# 1,2,4,5,7-9,11-13,15-17 +content_model = Model(vgg.input, vgg.layers[13].get_output_at(1)) +content_target = K.variable(content_model.predict(content_img)) + + +# create the style model +# we want multiple outputs +# we will take the same approach as in style_transfer2.py +symbolic_conv_outputs = [ + layer.get_output_at(1) for layer in vgg.layers \ + if layer.name.endswith('conv1') +] + +# make a big model that outputs multiple layers' outputs +style_model = Model(vgg.input, symbolic_conv_outputs) + +# calculate the targets that are output at each layer +style_layers_outputs = [K.variable(y) for y in style_model.predict(style_img)] + +# we will assume the weight of the content loss is 1 +# and only weight the style losses +style_weights = [0.2,0.4,0.3,0.5,0.2] + + + +# create the total loss which is the sum of content + style loss +loss = K.mean(K.square(content_model.output - content_target)) + +for w, symbolic, actual in zip(style_weights, symbolic_conv_outputs, style_layers_outputs): + # gram_matrix() expects a (H, W, C) as input + loss += w * style_loss(symbolic[0], actual[0]) + + +# once again, create the gradients and loss + grads function +# note: it doesn't matter which model's input you use +# they are both pointing to the same keras Input layer in memory +grads = K.gradients(loss, vgg.input) + +# just like theano.function +get_loss_and_grads = K.function( + inputs=[vgg.input], + outputs=[loss] + grads +) + + +def get_loss_and_grads_wrapper(x_vec): + l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)]) + return l.astype(np.float64), g.flatten().astype(np.float64) + + +final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape) +plt.imshow(scale_img(final_img)) +plt.show() diff --git a/cnn_class2/styles/flowercarrier.jpg b/cnn_class2/styles/flowercarrier.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64f4937cdd93230df84002a4a0f9fbb0c3a84b93 GIT binary patch literal 95471 zcmeFacUTnLvnbkR$$})w2#83|l0iT~Box$Gqfq_bwnMr1n+Fxi!vi2| zfCRa@qVWFR4hgjd(0}($L+(3}8*;=eslWEp7Eb1t^lCOv?oMttPR{f~oZJ8}H}?Y( zZUGSs~P;3nmKMZ{0BeQ-2h) z|I#0Y_CNJU+TypKD0fl+mQAhyOWZ$ozWncOxCP){&&5+0|eI)U=e_nfO0tv z1Os?CZrs4WfrpKaM}&)mOGHYDhet?CaqAZ8ty>gCc-Q6k?icXC*C_b7xcCJ4Hwg%C zk`NFOkX%&+B)_^4{l5`#`3WGp0r&v{Xef68R3a2KB9zNn~giOp>^FWe*vdNHsp2^YOa|4$5FL$7}xlu>*1wAbVdPbXPV(%R=r;mb)PNkoJP* zZbk6dVaVd%BbHYmUQ{Nwjx6nGSA82@KG3o92uaGRZW~)U6q47q_6$wVt!W=$Jt6{7 zQIXoBU+aX0i6L;U0s{{*I#L6dJGU@+pU070Y4BNN@FyRW`488?co|JnkQ@IFGv<{7 zSa$`!3?Y@sBDav!7Ie2<_*;elXAM~XONGlR01xeYOd>!MSS-OS&Y8}dS;upU`h+;t znO}u^&Sav_M8dP9V&b<4>h{)Ey3UIsX?}FQ)3Aw5;!PZriQ-A&Xl^#^vH*|y*h|3k z%sSWf+(idQc)-QGqG#a~*hDeMHZeQ2I(re<=V~+7{yd*<;x8Et#jaaF>%A*B!5_vUV>%8HBVA2H#3j&apCBvi7?qJU+1{0Ut8*Po z{k)}qua-{NCr7)uP+W8SIe0{abVy?=>2%=J@!nW4$os+gJGRiSg* zU0rjYypr6H(p_$nif)$0Wn~RRme4+7jGS)Qd*xk6$s;SEaZwoqtf7F&EP8xeB7?Cs zjxc$WN3onD@8RME4@!Lu#1zX=>O=LZWeoq7=Snaz*9*9<_M2)me8;`7SPoIsS8Q65f zjV#rf*5uF0Mej1~N%@HsA4bB8B0Ys@>a3j)gY4~|#^h{G3(@N*craH>QjSTih6~o> zveV&avfIz@Hx<5Lo7e1DYuTbP|AHWBj=U$KdBO}X|$@3 z82X@ASeb$lziM~|G+el%17Xyi^zS-9!8;eI(J$G+HSbD4=`VF!YuTG(^BEJTQ;CHY zr62b>GenDQzaN8p^dAdVoJN$L&JLYw)7;`>uBBKw6uj@1UrgV4`edxB<%4}jf}5J=b3&M7GwU= zGT4@pospk`6{$Cq}%VY;|%qru)!aPz_iyl8!1D5g6sgU&x| zDH!xoL|i0|aA^AyAWNr9+K>`^bHdE9JCX zw$z-zNKYKynVRi$Klti`k`y?$MZEoZ{-=LBM0EYa`W%Bg+myTXi@-`HHvZ=@68n9p zi=(60%OdSUL+u0}inN8hb{PHbHnHsz1#NAU_Rt^V8>HI^GyK-sL$U`W`ehAUmH2Q1 zOaHT|2G4nzg@5M7{lZNPpRCEG4HupaE@v#3yk#8@CJIW2nqf=u)OJpnq>SnrOHNVw zl$;^^P5d}RTFR~B!?7cMIkcgDmuiO&gwQ(Oso4EXAf;ivFy1HX5-5;_zh35(ga>21 z9ftw)D{`eX+iKO+M{B23?Pfl2OivwBFM+JnqveZ)p<+SfNVBpeLRpbE@uITkg)hC^ zb+rOpAVbx)U8vmIvcPu`R21eD!tt^Y{Q0Y`9}nn_UZ&N`bX?U~)#m;rR4td~q=2GF zRxq`fz<*r0B)invW@46|;GpEd=+LkUZhxr0c_7iPdY-GcqfTpY%kpr8W8GWk7PoM1 zb(sAYJ>?9|T3vL-x=8vZ@ctm9f6&4|%0pa8pgD~2eD`bnA$g|B{jxFfNZJ&?5<)CD zrAJ*$788SqrG5?Upi4mYBIs0gt;l_@{NjyEYtxU3XeO=0p!)J5gZU3avoVM<@HtLX z1ZPlqh}!I$e`AZMV)3TUKCGj7Ec$bTus`C2 zpA|{uhMiI8J~uw7t?Q-tV`#1!^P7LvuhPaT$p>q;)Oj!n(JvMuuPZ6~JfgDII}@$2 zqa-tJ^+Yog3%j&5;<=xxMQ3^PIZ+{D*sj$3HXrzGmg&|BvG7IL+Wdi1{!E+CB)$#Z zo65#YFTrj>_$45KR_JcGBVzA&)0zm8wgBNMzwK}dfQx)$jdADA+hd(f*)9hcj4fkI zR_TV8YwM~X1X3A;569XtFAx#4>zBZE_)k~f_4Sq4PVe_>>)Q7e9WQ|;L~60s_){yX zEc<&^2vz2`Y6P^xIQocvMgvzy=rvW7)KmQTgf*TuZV8xWLp2yRU5|M>9Ma7}xe+7g z5Gh-TJ5MAy8l%L9%UJ-Cxjs(UE4p^EFtVrWYES(o^C`yWnVm`mPu1d1`!BHkhFPK@r74#M>|1)b8uYfjD0u%KYG< zvTCXlRZEd@Uqp7Dr5HwIx2pNV1g)TIp&cjBK4fLu@FIQX>sf!ry5yeZ7lsxVo=KhY z2yke=m2^)}PFF8Dr0yb0U}Uy;y1LzWw?(PE8?IH{Q{V8_uF`+NjmmJ#-o{URh?-)| z=k4S^<@sUDKJ;+NCtX=?xa#APaKD?W+tZM$6vB3UJA|>vR`G}rIDfmsx_IsBK!J)z zqG|VX1Fcb@$?Hun$NV=k_uuM${@C22(?DnD)*TTsYZC%TJW_hyRdz9W*4VFf039Sc z(mQY037GLkqZrh7nC8f@8rdFN@`1`=j9vobp2dmQdjj_~-O@e{3QWv^`yHx*WWRUY zO{)i8O{IuO5NQAG`oaQK9LCaoSTPb>u`tqBPEI zl~hc@3T>yg-_SMBYfi$9PbJr%?8o@Ewc|wAzNx{~FHG~;@^$#q8gqtqDzsJ)J?}Iy z7KK$C@1J^ed2rCap(-1M-5jr9^<|srEp@vDzWQXYOS%o4-C-QDwHA_(MJi*?TFNIfk`so zOF$kHQ+UA&ITq*2b>9Z#VSZ(5n(5y)z&e)^7kD~GUrTKKdH5spNHLa|-IM-K##GoZ z!o;M%5G{vWGKb2MJ<}w)mRw&lQ!}JrH)kLWBpcd+(+^Rmu8Vy1AP z$^}eQUS=wq#`N#KiyjLo1K{t`@R>Y6%TFbZRFTjiXSC?n7^jtUQfLn4q|vCMf%Glp zFO@}a0YN4-HnTmb4^Q7>x0jD-s)uS9Dr71rad^~R4z0{bB#yva*K0osb~i=y;>uY2 z7Wq#cj-Dls{kQ}o=AvQ@!8wRNIg6y3g>fvRoJ&Be26;ptHtNIFEEAbkouUL|REeiP zFWI&kUzey_)3Q`^=`7mJuh@?cAoN(B+V0gCJA76fEUWyms&vDD&l(RUDyp~fV878K zmD66ilB`j0@NAK(dSzx(Qn7j5-+Ep5&SuEd1H49uhiuN(ZMfOdFH`r*Vh%8(5B<$I zM8>r&-Hj7?cMt+)rADB!ZBqHsk5t|qSe(cnr?nN zJ|&Z?xv)v(X=&lUsa)a~D2K`&%Sdkk%;R!ERX?wOv2BR_F(pcV@^R-ZE%Z}8t+Z?_ z3$;J6cgZew$;>uppKZ99*7Gt|qk`2<>c?0~;uT?@2CEL1Zx3no&Ucu$N98Bb6(;n;Ba&b?QfZf>7QCQ1icuphde5}tE=WY%#P9TEtw83nFVJ~ zX;d&o5uez*j|bsIKiL(Lx+&Tjk0{J_-xL!}b%v321Rlb6B@;n4;s(y&Z+m`%E~#NE zMc@`#4^leaoZriCQ9A94s+%Mu=b!O(Ss!3j-KcR54eoY*29f5>3eUSu`v#Ou!SqZb z6>YXG3gW92cDC-a`;2(3h)pDimX&H|9L_CcH zh>NEe)(7)*W8orBKd7y)H+mvaHNH8!ue#TwYR(DO_K*>Tu!-M~X{yCTc@GsB-=xhw zx168WQq`H5eGPGNSX21U*4V=7vZFx2O7s=_7VD@*(JSnn&vf*h#dg-74JVv7K}i~) z$_lqpT&89kago|xu(2nXa{h4KfGy5sZMrt5C;weDLgIzVXv(XNBAezfit|<9k9S+2 z7-hxOw*yC&4sR(AYcGLd(o10No5RCrl-*&M00$baCQTMCUGL@aDtBQiOOZxzYjqh+ zNmkl1mC$pwF5(?UUFFcx0ehR@%d)yr9b9{Ne}9=YN#qIBbEmx+BMCa~8p3-PTC;sm z=_>;bRx%8>`<(_)!8$%MW7HSbr)AV~+D$#6dnE>67Pi4jb-M_OmzrL;xa z8z`{2!k8myQE#*nPr=x9-K_qk=cHvoc?1f7N3z4&2K)3rodfcJJ7Rxa`@l3`B03=| z*!a*&>(lrHo884zRqOn7=ud|#i{*1o4|1(i+G=g@4)zwBF-H*PSc2hNPrFl9Z&U9D zBy;T&*swWX%r_lVD*MWliM*y(EEfFOFk)NYP(8U0<=?Q|hg|~2dDd*Sb6m=}4G0Y0 zj_^lJo3y7PAj~&*w(}NP;?oiR2brGh(Csk6WPuVc?umhPh<2HXuBoL7!p<6 zcMLl8uqT(`v>2hJ2m5}S@quh>Tl75t67V?R+mFe;a8GPIId3s^cjA42MhrDk)PB|uK>j!pJ1a3a=i2&gq?iC`P+D9xHsHjiw9_ zW0K&HV(4AUU!+(h^OH+`u*!&N7$5x5m8R!ZG2%jSxoXb(8z4N zvwP>1XzX?`)+uuK^}hM!p!#waH-+&YPEU%lv6EOKl!f8C97%6l6N^vL?R)lnvH5`_ zF`btq9hUeGF^+><0YFUeCBXA2&CE&T^HT?M3*DM2QSs1qe&{F%Uvyh+TY%v6IcV{P z9hbtpBtOvlCa*y^{-=|&7&lL-lli;b_J`FfQ{q^9U%EXt+dA70x7+<38v|Fy?A?kF z*qsJ1kUx+CzCE}R;bWLb&Bh}P0@OAkiGpeS@EQ4hIHV$0e&aT6wNdLlN@N!RQyI(Y%1~#^u&u}CB$v`>VCN2t-uJw#Gx4bjIk(H@| z2bW=gcJU>EYt$$l5LO&DT|GY5ASfI~-QgBSxB76Gvu=131~n21w>PeCkos6IlfLEH z)zmUNcL~gtUhv#*<9)Ae+gdGd(t2(VQ5G$~eb-v^i0uQvM`>}5#@2T+Hv-}tQP!v3UnR=jLMs9|VedpUKzJVJ}aVqUnRu{GWE^wPOm72cd`B-1WlOJ#}Nb^~Hel zQE=w+U5f$JF!%2cF|7Xg%?Ll0qr>xD7R0+~1d$hP&LVbLFC`eEYV6aTQ$M}N9u4Fz zVum`}4#z%oS0<5de|ee@5?@qV_7pM;%F}ZIJ9JjPc-|`h<*98+geLWRemehV-UOvc zP=gc7Vo%%L{HP3Bh|K5EG$6nic~BpHi(q$4uk=kD6uY&wy1Ttc7Bh-74;{mrQdu<{ zSQVes5ihcb_JW#Uu5xVzOmGxZ&*}|`Z%@+uk@L(QNg|YFVB=#`^(HzprR|a>0#Wvb z?nw^wHNHkvFg{4e5sxN!qQeAHTzFo3M#7odvg7*xkFuW|n;>-iElzNfaPNg2MA(@| zxi`P0EC6+gdU&QT*JD>_++ZUS;-4J_7Q$uw3M%>-M+EB6HWaKCnj7*7`E29ZqJ*G3 z!6*xTO%oKk`Tz`p&=J_9H?p%+C+WY*pu{|~oHFFAjQT^K1eK6p!3{tWAT>w#Btxe48v%k)osZS*2sS

y&Ba;b~B;c`>ZX~||d?dJHl?SH11P zdd*@F^E}fW*SE~bx`FNpt;Sx}Pilwf*?!n^pusJHh(QgKsVQq#X0^s$dk zNkK&Lv~xt>vr3WgwyYh-MxiGm&rNx<#4JsM0ak}}BPM5bx|rQWJFE@N8qBr3zEaz{ zyV~vUCN<9^5xXCYN(ur9e3eYFRuD}>P2D_WQyWUJ+pAj7??vxT^2_UMtHb)-q}n8k z(>=s^KG;Z03<=~jbN>mw99x|qayg<3b2B#M^d_886hS<7 zx&%JtjSua9P1pBtTW_(KE}NrWB}d*luv>iNSI?a;xPd7t6Zoc~PT5An)LzDn-{VXhXzFJVn0h9uQ3b>_+s}>JK>i8$vN;aq$R=!p@ zp5JY8^DDWiAaO)^Y=#dGsuU;Ct@()$*7qY$J;4a@Jece}*IwxS^~Sy*+&@#ss^47w zW;m}H<`H>oCzoB5ko^KAvk3U6a~gSTKe%SLpd4N^UgY@Y%NmpL&>Brl03 z;55-+Z%9mVf#*28bqSdI3^AvkM%nJoNWC6k6S5=j#1*(Peq7{E?Nr#LagmwovVD#( z?1&K}D>&cFaztf4XcF{dyzqU_uvysPhmJv)+o8EWP6u-4>Ea3!lgN{+C|C+YutQi{jYmi3PgC0?hWoEdmy)&A;HQbg6Z&}R-*0scJWxG=3M;M@iWj-Z3LL*!I&eCW zltJ22!@Km0m_*E=!KnXC$mf2n>K~FT@Tt%nrLXBu3QEaqx@~PZIBW^9H1j0xAio@q z#=Q+wSx<$slm{!$yy5#DmjIT9qrYfN!pSl6wyE4DKvH!HY`)pMn65`pK6RsMN!Rju z4WoNnbC}=r!0=6@m=4=)!@hr39w&6@l=XN&5iWXH?BOvKi;+TwEz`cKZ)!zv){lF3 zI=wU(J_i+;OD#wb5yX#{_F03-u|UlQ@JaaRsc+H20+|Af)@ zu3_x!8d;(I>VvM3;qX2MOSs^J7$QtQ@y`uicyh};_ z8^e2zH$c{Z@d#XFIQ|QBE%O)&1AsHqQT^W*w6!bpe@nXj|3kaB%#ADAz|~xl^G22W zwf(jtEfNb{yN!R@hw=(UGelOu=5qxCC`gwyiV3oD3vdN&0S~|uxi00+Pc@VtgV zNZ9q7dPTb)>8j^H%9X?Xce#>wCH0@>KN$Wd{8sx4c0^utB6n9ldQ5XCh@*v@s<|Bq z3H|P;0dnU-LRU)y&_ep*9h@Akui97TiKUhMf5H#loxs1~UutMr+x(_qPZZP5!4_oc zrsMF$4cQsxALC$L(~xLlerNEM49I8+M()(Ent|HMf>=a&U6}591R3 zlK)WtZvwIlpyK3sWovAACudcNyPM^IE*u;Oq&VK!y zB)S?1K>2-Cx(Ye@wQ#SGKKLH$n0yzEzaQq42_!GeKCxGKm0LPyIjz0k$e*!rE1aSNb;P?~3 z@h5=ePXNcC0FFNa9Df2h{seIR3E=n>!0{)5<9`m|_!S4hfb=;6fHrbN`X~W_5Re8e zkj{Q{q_dwMPy=j`ZFl76cHKgTH6TL+{$~z)KnUOjxUU5M8sS%b0TwdO0QqD#3i1Jd zVg(y_cV`hUE=M;`v#UqyIYCZfE-y1@E*?&9Edv z@cd5-h+TcZ{}hIt7T1*t|6^Xrx&Fc-&5ATI*WX{-l{)|T%b!91m5u+H>yNqql?DD4 z^PkoA$6Wu)0{@En&+7VPu772Lf5rS~b^S5dzp}u;V*azb{+R1uS>RtW|5;ss%=ND< z@UNKvzpgIi{hhz=3RyZLZv}ZG@A>>k;zFsb+fCPr3sL_rXvh{CIE> z1rrks>joAUCN?$>4l=>vKMM*P8X5*V1~w)pHa-qE4nE;k;zB}X`a@*-y~Fzj{j=ZS z&~@TM6kQY(G!$f}zyC^H$bQ)hR3aa0K}EYtT!_^7x?o~q+(1LeMg?$?xNFj16y(!6 z$Sj4JC;-+?02Kx8I%^>YGHoCRDi#VV8WB1Mz(9=2BYg`?{f?OnFUj+VjHEZ>-f8eL zk(GUx!Tzyh?#etENd7avT$5jx=y%pbG%PfvP*h~LM1Tls7G%yrX$*C{bz7g6Et^4~%;}Tn3(U4$kGgR1z75rEf$TKX#NN!t zg}#i%8U~{_&s)x$%&MF5cIx|0__B29KY5nYm^SH9yjf^#hAtn;6C&_E4hLmp@fgrU3rT;=|l<5`?TPsrdX z85L{KA*E@|g=lkEw+Bc05!-p`Ehj2op1OLqSGZC}cgUC$sw6pXm?F~N5$#8fv!5!J z($Tmp{G{=jX7zu+%EljN{!)%!S$K9Pt_7VIdc0P5veJDl|MX^93 z)dv-_*FMUDzGNi`;XOLxibkZj^4@mjC^3_cCv~55XX*$8=(81agPKzc(1fVtY8f6+ z%?i3e?=K$9V{&@EuL=JgwQTj$yE}C>sUg)%!v<^dgJlr6nt1LY&N4z9`2b)?x9xaY z$R!{oa$`A!s}2-hWsBR~#a5*nCH#mdqrbd}n?enF9m29A^^v0E$zzMRGj!kpcGaQg zXX|xcq^)!$&`Y4pfFM74^n(0+>He^XXzoi^Qj5yfPfu74_+IK%?JVJITda)hEoIWR z-4DYw%u>n>{^@bVEsZ}WL&Hvtsxzl^|LjO?vJuBYK_r8~SXeZ;2O0^QpoYJ9Hodu+ z-DN86&C#a9<(gDh8B$g-pfwQKO4p$&+tpDJt1v;IxiE2%p>e`d^5bW3c5zYs53byb z#BdJBUbw4<9colH$-s_plrYOsj^xYnSjx)S{xkU*4s)T}LMOOem{K>k32&oari&Va z_^ppqp_p%$MdYKd8ouKkId!k-C&`{r%Ciji#SZzGo{@JtI7ml&$fKarPW*Hk5mW`d z^v_7OTB4WQ#3FBktKAsv8Nd1OPR>ew5v)qorx?jxX0)>p%=3d)>zQL@ld%az`!ZYg zZ9E%iMSn$B#hyJ?rS>*P+LR>c1z4b4y53|;*5k>Uj+N1DV$f4}o$GW^iF0 z%co*hWMUOIYDe@B%VdT%bnH{#-#zGNy_*Q8=8hgE63)8$AiB?EH!Q*FI z+zGN~DxaTM#b+h;F8;__09{ z8^sHgfIGwBk$BbIagCmGimYHFjAtaC;4Q2ik4>*8ryeYwK><}~W@ zr&z-ppL35$osG(o=^mh!&qBwuy1u*Ovh%U^TavH~qpl`b!<5xujc_yW{CresTkTcx zLx4j6!q#o90a8utRljW={{+&y+)}kV^HEXDf#LN>F@-w;<4|Kd7RqiD$YxHOH$i^% zCK>w?k3fiG%(Qb(KE0%C<R$aL3lQ682c8 zBVD}L4x9Adl%zQ{B#AlV`c!Aj!Ka3%fKM1$wGs(c%jP%O{qC;5c-x*PUV^M!#@BO9 zDdlcs3Mttu*+e}BtzsBM`DZr+Q_vO>UB_GRy>4>&mc$$AY40xAY5(`(=5o`Qx zV$9k0)P?*uHWzOZ&#`}lU70}#Tx0A()}y$&N3IXFZncyS>tQPdwTYxKwn<@VHmJHw za(VWEGkC{jMnp!(%L1$2z9{B+VA|J7UjpG$3zCWQWAy`|A9y%VqIa=v6*|g43j2C* zrI)+TPA#045R9~YLfmKkDvR}YcQmxMG>U8l)J<>8%3{Ucrw@dz<*#;Tvb2DXT$(&s zv5YFTy)5D=Iv?OcOJ-_FMZL$esy)`V+GhLa6b8}L1!d-`Jz8PKG&2>g$?=aedW*tj zE%BuVG=%2fW>O}*wjb@weLrPSX)c`cSKB1Eq%53HRCT(qf`PKWEd8G6{l(3TNWiQ^qagV-fWk{yIqF@X9 z?sLoLGTrKL#^qBY7cfa1Mv(+*dUomQHA!)fm}z+iwp*hf<lC%HC#kVTS5B+n`rc0yW6;=TMe z#oW|Q0hyUSuJ`ICRQAVA4ND(xM9G*2M!_farJI||b!w}gll0_`EZD0>jsi{}S;oO^ z)4ng-vT()Rb<%TbQOn?j`m;7?;sd-Xoh!{LbGY+v>5a;y+Q%b_C;=q2lH#VmqrT_{ z3Mg0&^C}OVQIt8q?v(4x-Bp_^B0S|-tfPKa|1$1dejPHO1r>2B@4Z*|;v&Ol)(S=a z9e9~$^oLA^pt#Iu)wPNx%w<*H;)T++kpxUNls7e6x{WJych;tnCl{_uQctXZv|);Q zGLJ|Ih@2mC68bAJ#xFmZRx;tl&ZaISnZY@SP{%zhevUN+*SG}ATV{FJlAES~Y?^{! z!j2fj6vbGM7QiL+U)cEwFk{*YY(XCL)Ew2Do<2AuYY#c-+ECx*&tlM8=TO z*FW_w#Oy|AFiH+agL6M&iq(wjPTV%6@K^Zs%zP*{G;i7)Q3d_evvXiuWUXKDODs(?dBd zIF=)!@)I0ofxvHKMe@hU+O(qObZYK3TT>y{MXZgyww;zDFOI+I9^xzC+$m+H8nr*% z>KIo&6nWC#z!BO>OZ9<$R@}KZ!6mNxkaCKFqg}*RoAzOx+U@AC2845NuhGow9Sr;! z(eP(Sbal^~!b={?f$s=yNV=BZcN{@xp5c?9Gu&x2m?|`R zpFkxzKZMe^bnBtGINM`b2HEos(NboCOTeTmWOvgvEGt7KbHeT3U_n4%biy`=Rx@qor1Hl1v0@+!;)hTb|vrRRB>?7 z6cn5fZ%Avj^Y&}mZGXouRqP?s>OXf@l>hv#(jK#x{l;`g`z!C#tw|mClufWX^JCe7 z^D59o`<9mp#Skq#1>{*Qm%*m^n`{f(XL6t8N$vVAStW)8g$SyshV6okJsD{u^G=V{ z%S&uO`|T3yZN2W_Iu9j^{U-3~jtUWOguj1usT2$=kMS6mLsLKF-AEABi#45g6VA2- z-x`@RV$QWrLJ(Oj^K^Ss&VCux;@#%Zmzr=X=JyqakVjkLb7qrZX`Vn|BM6_iOE8L$ zyQJVMg}n(rV+z#L2MQl%zl>&aQ6`{CB_(>Gkl$vcM9X)u0dBt}8g9U!-;hO2Hm4;?qVEFFMUhfyF0!m|n8T(Huu@e<@KJC~!K8xZ=31^jF ztlv}#-^*NofVWjdURwz1`H}nF5$s40S(zT_h`%M%x+(Q8PhgZ;xMuXOXv|tE4Qi>k zL$2#u1#|H2h=w%K1A88lZ*ju9TRf5iqI3Yh>7oodlNcIpo>favh$G^W%99W_?w=n# zKN>2L=x$Ja$Wu>F$hPhDzDe*pno(-}MD;X}>vC}A`{u01&={%kZ7xTbPjyPZ*0Fu? z1Ak&}y#bo`JJ_daA6lHAt>)o$k$tOHo18m(vuUyDd^mWp$^Q!5j!A48M~2YjdjWs2 zzzr0s>9*IUIQ*x}`6jnk>)WG`rz_^e934xT%HvFDDpQWU$wAsP$V}3vBFs%~wQH!` z)$dE|8*8U4tMD0TeLwkjQNBy5%{%7@nJ%jyjWfh`h#XaS!wchL-k*h!^Xf)?_Bnhc z?22Ra4K-Z8#W-iZO8|;Bl=buym@2>< z4ui?4XuP!X)TY93OxHc^>qbDe4&qlw3rLP@DkqTXm#trQU^9RF5o-6^fZEL zJ0hW#WnASFp!@N~+JUn{4z|kv)>7%|9m|MBkCkHMPo*V6$==VY-O*p(JHvO?;b^0Sipb~ zFNQL~Y0>+c@SX&ru#w;rnxwEd%y*=A9*TecvEksP)6~&7C{19JaAPuzIbY6PHIRQy z*BOyVJv8ZrK*oR8@G7}crj*{!oIvGwJIya6&9=>7I)u4&xcrpP zZ5Vwb8wN%HvX-1O7w2}DMF73V`H{htZj|^pJevoMQRjBz5K{;v6Ge&(p0=m!i`yUh z!hyul<%}JF_M$N*tD5L#Su$5)`Zf=gsg}59<%e19&&+P_GEB=lEuJ@%DkVfA-@LK# z%wfPB4hNUw&lYmq81X~`&p6OSlNSr%lW`*OSH&|?x9Xmn@_q^+aa3B|6zG#YBB$sM z)A)8H)oW#Gkk5fhx`v*uHupy{24*3s1m@ZqfzzdK-7p7TJm&o9%^u1w?De7Q)%na1 zv(`is62@Naz3N!;`n%vgqpq&@kP6zjWSQUm3?(yL>bH}HZ~UCt3`Solw$RqMs$Nn^ zjC=LG`$Q`L-To-cuFA6Po%_+{w3I~>+A9bXhB!%=rVaH)$FXPou6=@fPh;PK7J}~q z;3c~XOivYc?x3@HZ)6UsWkCrgWU5c@A7I@I!zGu)A!*CRvS{rFq8Bfh7da~)hXnAT zW0;FSYR;b8jw_9vY`ah*O4eN+H_3+j)3$>^@YmEOc2FnW7mB>X*ev(siekZ(#Hwdc zgc#M}uCedcLv|IW^K@8{X-VUgyCXXn>X{eh?`m##c(C3I&X^)#BQ2<+aFjydhS_dp ze!CFw37^fwO~gbQm~U&cTN#B-aNd-95%7>8V+_~^-QaaDlmBX3Ej>okJM>@e^|VN;`JV*Hbs;c0bi|o(au0y z(olB-Q#Z+yz#a5~M}hAfUw9Ks;**^z8Vbq!`>^6(0I*}V|<&kws^G9}Rm z1|<+Z961+t5VMcnO8m5&l-E2&@UqJ$LmJQ3wAW3iwhj{aI_QYZZlp}|q3TaG6<|?L zLd;{7s71XOdBA-C*}_#r;Pw!kZ78#GKkrRH)+z{WNw+o=Ba|VsPoH_i2vU?7mkCc# zol;nPXM^#0IPlczcrBx2R(9wa@%)kQMEBa1^I)T-%|n|m3Lk&S@H2)7^sEX9oT3EL z_nnL{2#lqzXX5CH%H}uTUd+1V>!i&_ACuL*z$n})RAlsxCu%9w3BI6QYahM&8FPN# zSecC~BDofHoJuHN6ID54b~D7@q&PbzMeAO2A_lhIfcM`1NTd`FM5o%)up_hqSEQ99 zFIH&wA=^VX15Fv~$NL+GNy7S!+T_GUL`)Y;THIK6l-^iAYdShUj_*<2$6+-6We2#n zCC=mzfb2%fU1X+X~2 z7gCr#9s1ITP0#q2$#sHO>3IDy!moCSS~c5MRxn?ko$QORcaqNG6qz?i=P85p!{cm^ z(Y%Vm>H_NbEtf;f=fwpv{$0ZKNxRE)RJcqLuU>Sr9T6SA57JOgCzy zGES=OFb6I34&6yflFF>9SwfAe>o|o&U)eRV@T=zC2oSNi@~I{)IhZInuBkB4GZMjO z2{sx*JPjay7_d_L7OnqnPHe%jLr0{sdxt#40MYJ}P_x?VCwlYv%;0VHH$DdPCyWza zjmnJtEO5yWbONLByOLFCaN3vjZy;36T7wsT`rs1dyY-dSj~Via=_Eb(=pMt`crk%* zsQ&Svp`e;bMKmL9;<<<6;$MpO$w%&=CLF^A^<*Ef50ZQ~gSasXQoPaUg!<@ja*&*F zQ|2SWj7OvQ>6AHHXz+&l&AQHkQErI`$76=klQQnU=l@H(gHXo{jusTV`Y|B@H3^KO8AAm&_4QjiM&W`etco2-=Bqs};J za8et2*D|B84M+Svj;&hmTT0F&#KOQ(M%4x7p`1oIahz8a)la#$xJbq5_4Ltg?j|>i zi~U$>D$*bRf=~i4C*$gCw(SwLR9p7t>B8Y!DEf zBz`eU`&BFsWz-p^?(SG1w5|Q>T3gjS@!ydu9ZmA!5?;x!WDH@5x)Q2U-co zKR-TG9^5I@>!V_3DQoEDeut;zA8Y41-|H^cV;`AX_gcz}xuF_T5zC-rR=*t8Z`NZh z{lHhr+B}B!-MyU9q)Q+)Cnc0f>9(6=`9(*Hei~0!Za&mktF}yJ#iRk7z0w*}#N8D1rHnJk@)o6kLY4hSbI>D! z*o;$=o*6T2*EkWS+`z&ViHOOTj_{`uCBAwWg<)@^;I2O@#$Z|k7B2`lZrsPc^>}?V zWBn2R9l4161gkSm>v7~x{Jq=aY|K##iydbH1N`bYUNpTTl0oNwbnhk~svxaEd^^7( zH`+2d$X$oJ!ELzpWl@$G4-$~zFBv$eb~T0z8-Ln%wmT}B!froNR|k;5-{ zvIWtGJ4MohYhzD8955U`nQ7_V06(w$_5=CG3w!adA|YW^TWJmQiReU@uqM#4kbXuG;!P01|;``mlLUz9pz4Q6FR zr1SuowD#WjD6RwxTXs|q&&Qwq7@X%Dm65OiX{;bVc!TlHOb4#8<5BP}*o9%S%)E6h zaThX`V@1Q+>Qnzp!eNNFuW0$N^wU^0m+7YN9h>rM2n!VW4nE%0EMtMWtq`hof}Mmm_$y<=IE2N4Trb z_fW8gLe+Ryh$|@b*OJCI;^OcU$XLM?ck@ir2rq#e>E%}2aw=AA4)8Q9HZJiLb4~d; zkbyThoXa~W>}q#@pC83+K{pwh#~llKcC3=`En&|WD__^xm|;`&6vW?6SnF3C`1~+w zCSEnwuHi1Zg?ywmiYO{WdFH0%l83q8w`8S_Y8u-+EYWrgJf7Znh`v3@x<&l)G*&?r zM$z;N`AQ^<=O#ZD?#$a8;Yl8dr;0av~FxPPmSC7G1?y5OP`2@t*E1ZjDgN zxV|oF*&S8#$lYjix=lKp*bnC;f07^d&97iH(tmw|!z){U3L6mTaNJ8>v%MspkWzvz!L|sLO!A`uS+{1>zN2B|cMIGG{ zH7XGsrtJuE_c82W-@*&*Q{TLNV$TLhP?%z?QKwmL=yU58Dk&aV!YejTz> zm?#F4JEeIG@NzcEr?x3eVZrIO74`C(D%KlKSc`=ZR#RP;=5^0?`0A+chrg0m?oSQ5 z!)R?bzP!pj{D_DevltCM&rQ`hN@&zWAjyc3ixl43r*>DN@ckK=UtD$ICBWw*_|Q;+ zL4WIgq!h}#m74+7fjJ?ee#UXwD74MkV(=}>C3AiW*8T(|NtN=-(0t`vOw`If3h2rd zn!R=8jIP+gsqO2X#p>q_5patHIO;dwvMvizVNG9#>Wae?Rd2EBuQU?h(>z($RgF<~ z*Vmss4qJ$ka!oj&`M%>VLD4bK#=coLZ6rtU9!9uQNN3>mt_@WKU5fZz`vSYZYfk>< zS5RO_SayAL!eL*5U%kR(1i0ZIt z?=h-FYs?oO9ZwLh{IQea{TczH$kg}NO2#g$6WMo@%8mPto&+QJNsiTQO)1lIRe8uz zs%XLAOIW*tEjKF&Kl7fpcN$BXEeDBpej7?2D}T-i1g6~%eMwX{A{zWIPSSn*$Ef;> zhf#`tU)=lR*}bLabUMpcQ^<_Ftw1p{Cvbdq95lzws-QByN)CG(rkay{TR!b68m6w+ zf+}qSBHX93o@v|R0#cq-x8#YNboA67Mc8?A4)4K+RMbv7Q*;H2FS=Wsp_Y}B% z3TNnqZszgswz@~ZecyS(15aluo?<-O)S;XwvL>xm|BUvSl+m;)&qA{!HFJ2QC`3m^ z3?2viUjTJLioZF(O+zXfJkF(pt#-K1#qxK4kF4DGTg=B;v@%?0@=+(WB8@@e?r^Oc zzffLY0XnyVg%p{Hh{|087qRZi=6xX${84q&Mn@vOvlRzt#B{cv-=SmRvsS#xTyiRq zN)Q0?qY&&^5J>zqo_MfMz9;JXyuK;OTgELA>4Vk&HO}R&9uB|XZSm6g8+)t%?PV31 z_ber1!68Y9AGV)v`+^A@Q$ZHSa^q}Wj*?s%9?*CeJyLtJ;kx%`==V1lI6FK>D+OtB zgG)GX1B=|RxUj9{F&eTm@h$Eh&3@u^-aVI5knuX5{7U81s@mISP<}%Amp9Nd*OqNR z_dH50GuY*=zWCZJg)JB?C^rLQJnLtP-0R8!>{e?h0L1dISfd|{{shbRiLEnTw#r!X zQc5FgB7@=_A5@O$7uX^+R{N^qEybJ{jSO~{w5fsmQfO z2=$;lZ>|FBV4_8`n#&D)dM)d7bV5fPC@s!LAZ_SAy5y#7gY{Fb22Et`c<*rmGP5j> ziN6p9SbrL5nDZcV>ZM>MAwF@)h`KgH{$RFppq4ucWfVxAqF{tm%R3cXemWLp&jBRN zW^2>MetG4yxSmPnn{d(|Y7~GPv|;#frz0U4FjI~^v{G`3rfAUHh)gR)jR6eR-)~Cj zT;iCOBn@;~W|rPK#5rpVi55-mCjb#q>KLE6eMX@lQE+roBZy+NLjqo#dognHyOH}; z0hy7>*+8X#9Y)jD7O|@+i~D&k!ahPH$B{&lmZ9YQ>rh+CuBBbPm#mKxw9uK#^O&p% z3bFL3>Zbm??V=@TGFOY4Oa#BTNF`2Cxu@wq2HR>--3DNRs@}y#HSM(5k=(RzWjEOh zM$CA){{Wa-K@)Y6cl0f7+WeUb+1)S4yLFjclCO~ttun=jx1d#`(hpgy~Y%yvA4nM>(j?9DQX=akjtdU^inIn0Db50Zk+PS)Qec^q|nN@AoEKRWhbNFP6sZDU6QwA^o;GV8NJi zFH`o4p8%h(h1?`y%oE}jeM60*wRc<7dRKD#ey$;*2bZzO9czeZAmGGFd?tw{WV)42HXwp;x@ z5xv1U4n@kkJxAkS#cLdsV69}koI@L&Q_NyCnm@K!`5z6!n>U*1_g6$_+Gvo-?sPv1 zvn9rFFxcObI^EA>w2}{OI$hh!+~h)J-4#CLwKg9?wuLAPWF*X;jTM}7Ya6S(iTNiT zoyA!7?!9w+VS8;jTf2ymrD32TRmDkRu&CI74J&RhzhE(&Hox>(T&~k9Y&TdO4xVYB zmFgHT?JH{j(o60B;qK&6#o4#wRBus1=S)i?+1LPy;LQCMO*z)e-%i7#t{2%GnBITb z#P=4Kvpg{SXNYyB7%3*ZP}fE=5C_d`sH|87rPTrE`{p9<wzLANF z?imbgbC(B=tOy)2SqcJ6(xiP3xu21Zi4ZJ4rc@ovK68%;)CTyYse12f>-ZZ?T-}1( zt>bJUOM5$eBFEd?`B~Wq`+hpcO6sqfBC#>%R%XiOElinKzQv5l=KGF2u2^=9p3N1n zxqa9xAV`+N*mIkY6)2A(`y(gUdg{`XTq~5LTwrCO>g6bDh}J}u*2cuv{YS9AGVywM z$9V{v>RgiB#|}bP3d&(edX-mI1Cw%Me@@z{o=aO1KU9X`bWOszm~r|?n)TP;ymd=1 zw~&XXT3)r^T?vC2=ZZCg-ZEKax+i~o?i{FhV^1~F{{T+%Yvv+Dop(MbvI4ERb$Q7)n| zIev+Cla;$(3c}8Rk1?0H+-$HR_cK=|OfW|jVuDr-#F9ZO+AY<}xQ+z8wFmrszH61_ zOHcrAh&wOSvlwoRguvV2pvou685>tK!7+JZo#=mNRmm}!)kp+wx7(>>WP7ETcTdwsDgMJW-)%&><|{H$^d^13e?wHoGgK09Q5#1 zSs1ge*ley(QZnb3Yl)91j8wrx%0*cRDhhgkuhUmjs$e@hs+4(ZtrU~nJ*}Lunc{}s zD)3ALH!uS6p`aW|@I5pGrs@!Z3pO_D`r-+aAVFCGT7l-fkM+?4HI=Z{ zHG0-9aq3w6Ow7K?i~tz09)H;CbVLKHUBZ*TwVGI(4`3?o!n@Uze@#y6`KiKX1jC2A zxVF7{ruL&|bwwlw2}%HceKyok#3e3vP=eUEKzG0q|NyK79`LJLwF3qjZl*ttL(sXq8}Y z5uAM!8`}ECg~~X$z4ljy%x1K3C`CaB$f4D2yPWZ_F_3VWv$sbkw;ClLM%OdfrJEht z?Cvk^qYSXP0oR)lRhO6GHzJhP{aIDy0AN5pLeE`^Z8}I6@jy={+!F_1ci0Y52L`H& zeY9e0x7SY*5|li|hKqh_+anCI$$H++w100RAMP84WBhlfhonw{bkCSREAQG!G15q- zOxBzv_K4%V8dU6RXm-?jlO@grgQzR`-X%awNf3=$l#PXVZL8v^#)|^`Am0TI@ghY8lZ2B3)7+nM#{d`H$@thlEprCmXLa?1Yo# z3X%B!I*A^TdZ{n})#le-`2E!vwRSgqJYZoZv{Nlvqc8VJ;1bNo(N?F%+N#a&LH(#| ze+79MFmt+z&0?1`l8*Lle3;&~54csDkk2yGs6b)%3R7Xfp*7L5?~PDzs$YWP2lHr_ zx#;Z$^cEPEjw^D@1RyEmOpWBR+|-d%=T=loMg~&wOIztWE5*?|oOtX+HWv0!!+0S9 zy~VF7Sr#GpQ+m{~q1I0`ESd5fE|ppSY`e0Pu~_w-bjvNTTtb)gL{!HmNh}_ z(K-Gb$XVIua!YpvFxXo%$#oR&@yQHr_R0M+D%^+yf}_@&CvSlUOf@Nf;g2X(cpX*# zv+P}d7;LWXvR3bmvyH9pEu!KQS}Ed=%UTK_Q?~k|jy_eH(v-tP`tw-1)orBKUC9cS z_B`eu`?uM4C$~#xSz0LLi9mQ(C#UL^v1T>#WgMv`k~`)?807m?`Pxh;2~rjWbz$l$7I$g?5qn9)nwcRa^vORROq zaZ(G2*No94C@M`T%~EQ2p&kyO5*2~VssyniKr)0}qZc2V_-4VlQ-5qD$yqhU?X#c_A_b@_qJ{Yy>@;&4>##ztZ0bCdxVe-}$})@V zqW*QPw?gZd7q|VQXS~12+A>;N#}lJ_auXbejMY+VD1N$MxW>w5C_r@lera6k1pl~DWj z&uPeIaXo6sv|Q(Md5Oq;Oc69}uv7YuPdDSm0sjEut9%Oaz&3CrMhBy%iFt7a(XEU) z-mTX1{{Z%tq?7eI+9vNkj#l=v43iQYAMZ6oKNMFAn#_qT?qoHcl0raUWQ9Ojif>&X zF7n26i$sCP&&_yO5uo3y+rc&#!?)Xfd!CmMch`P0$(+77xJpbn%=7z>ZRM>)bCJtC z?NBMG+D8UQ8P_@7;yQiYt!9q|(JT>U zY?hQPT$&1f)u=TiMxz|YsKQzvr7e7q6DbxXxJmxkfxU_Q9pAhB&~w7c16*2P z+T2Q_IIN~Y${C?nq>CV-8|_V8AFlAnaypdv4P9f^hL%%HflX1XhhyjwJnQ>GjatRtckD zxl^6hh{xZ&f7?CD(bD7=b|>x|Q-8~q2DvD-xpY;cx7it6+(Ke`L;aDGW#oSDENP>H z`z|sY%HKf?7}W5BNZ3f8d4LT+@zkt|MW!v%tIhA6_>Z99lB4iztHz8wd ze;ZrgBy%V~U~$T|Ao|vtr>g59(i`|Io3ZP@A;PVnBK3JX`dwdJMn7(5QxJ?%P&Y5M zF8&ACTF%#;FVLM1^b7$1fJBfk`IwpaCEpSvli2Eg!>zquf2o=j4SM~nh>98AlKDtr!j*> zzUsd&V~}H_mg!JMca)CV?jw)fj%Oj6fC~y48IK>JUmm)Kboj@Yg0Jf6*A)~mhu~Ut~)gGsrF5_)~@A6LrWkG z06PvN^3|0+vm$IrWAa0t;BBg|_ZfLJ8RM3g-a9A+i9MC7lf^>1upG|x+MkxR)le#| z#oYkR$m*__Zuvzz4@}0cI`%uR)7UJTLQ;WuE)a{n6rek5ynAg z{{Zq=PJ|O7D^>v1lezJyyIrDYLPO2f#$6`3ffK((!}({c*;?SOI}QlFwq`qqhBFfK zON6MQbK{Z|Us64E6Pr2SBgcpxd{W(`B3GG4dj&c1*Tnsn{?qqz=VZCW@Z}_PyiuTc zscp7qru5aUeTqnxh5Wo#PjT*4V$6aVrX;@)+h#9qa4~iGuC3xiw>ec}=u0y)Nm44M zYJ45_*-I7?a~<{yh<8&+G9}6WnQYqs0FE}ejHydp%FS$usFqAiabQajrnOVc&~2r0 z>8#1fViP0z7_ztG#cW$W?T&O>+2U5<#H(cmpoP7u8}xD^f7g{MrMso@r}Y7L(UY{< z?^I?>CXZ0Qd<55d4$qDL#|sV-$t6N^9uyYXosW-ASJ{+vDB?6d6;?y!4iGP@lkC?P zUm)3~rXta@4uzzTEy9IJKc+twr3Sl&kL(UN79&JBpU_X{XE~JxpEIq%r8qd5;7?m+Cp5#eZSi^75E&pou54vW*J}ndqz- zF65noC(vng+$hJKYG+2r&Z@Zzfb^j}u5Trp%VV$dw?DeK!`B~mYZ(sEK#WP^Xb&Sz z{YcwKmNjvay0Pl&nvOkGQzKv1B9~t>_gpScNIK36+I+Tc*Nib+Cp(G?H3|)al6PZD zYPNQY`A+V>NLWYq;L38;{m35+)_zdyeQsIpFm|{i%FmF=+_^Juc_?W@5~IN4zBP@^ zpYH5=IDq*oY`a9}PbYO+en9V?(#Gb~7k`?xy1l)bV20gfl2rE{s9e=kWvy7#VWzP# zsruqH3aWFEmW44ozgEO`Y&KUTX>WgZ*|SaiPSx^uN*Y??ZCyi5mMW}PL;nETUqZ*o z&@9T8gu)@UP0F1E76-5PQJG9{Z@B7rcF2}<{7=*Fqc+~dk7Fk!Uv*Q8{mNlbsG{{XiYj6bT zumEhjthX-M%DOAuoe%kcr_B!2uAzz>=@MnSMV2?QFIB$mr?<}V^r?iOx{lM|k-)S8 zoBAz91eX3^UA5evIlQ7J$Abyz+kQVJE^~n85y|>^d7--)vuT~Y#No2?V!lbd$a@*? zreiYL%q}D&_ZHGaLaQ8LQHi4g_0jTjAqvFokLkn!3-A5cE3a_+^;$PQD;bK5~Oiw3;FUMXCLxk z$(WVt7Pc8nZ;#<~1Q#FEx5?Z`$BmtigEG3HSK0NtO>+P!pQA!OvT~V$=m^qR4OQxPW zt~V&_@=MJA5QH%qzxYo(t)fmOCdXX)>CIK+DNkQCf#qP{}sd zeSrLA-}v~UN->b}R%_tDm$E&6>dU44pvfjkV=UyrW`E?dTaUBZx)mqgRr^EKbNX`g ze}~-RZj#Z950b62kxbkj3bWSvU)f}%zDtF8;=YPRgp&NDbv&M}L$ILLjK>gUn1Fbz zEb?;1-WyAy1eR9mJ(F_(0GJpC9BW#Qsi~NWR_d}EIa}B){Fz)+T--+^Q3}ej{jE?z zr4Hhrbvj}W%Z84wv*js;&@gQqI+&K$CI~8$(WMd~MRn4osgd+nZ#dz^t+TBHOc!QF> zVVEIvUkWM?v-L!qON}}$O_oB+qr~cWQjLMd;Ul@djzcWKDQl-R97RP-ANY>?9P?+9 zFKC7dn&^f+c>?SEhVGaQoWI+}+P%^(4>3<#5DD|Q(?Y)r+VyP{7`@68^sUa#iWxi| z#iO5nr3zx&zi{wB&}ujt0=ollhuo?bM^#Vq7O#iM&uu!gG;)20rhpBG=jcT>zM@gK z0lMz6mjhhO{FldiINKz2}@-;%YvOL5p63nbkYhPag zXz_>vCv2ZpwTzlbBrg}(XjIp3RU)Fkd~9_Yz!m4& zR^}-|4fUi%*H!bZ8zV0R{fhH1|JURWZzXe-nrF)0w7$mH_T`Hr1>A8SJ$z}5dLdua z5K}QPu~0VH*)Eug0>M=05>W0v3ES7ki8+XbVFj@*lN%u^LYEfp`VZQDur~XApTz$F zrl2NQOhYfa)rHSt%Gg^=KQ)q1W(Sb5R8$qAHK_^?%0GsrS{0VulzdEoc0xT$=_g0{ zk2Pm;WtYX{7ULC;xBaZKNi99);YL>EwQeVB4_{qX2t(ZB`{=%bjf0O0OnG{~1rGOo z)RviUy^hOc4Yjm+85@rtF_uOM0U-V05mDnp$*MKUqX+q?J9Zqg%f9Fuj`+ zxvOi#hKnE`Yj0$Q5k|X;j|4IEu=;6?%b^JSkorhsrQG|$tcbI+@!dK^ab&vdj?Xt{ zd9GLs02bOPf>54O1GpbwT532nLnMik;Cz=mZ*z`7Gxqp5RN)7w@hXyez)WtHX) z5be|TYDi8#otf%bELDy^{{SH@84O%2bn1x0DiB8!+(3F0;<{#FWFB$ZQFjb`?zqk& z<1GfsAKq@~`4aLYfxCOCtuGc;xJIKCA%d$fm7@yyu84TX5dvC*wEeRl7&Mc3+OH##n_F2(0OiV}$*9Lcy7PhR-Fhnw zTybXYyGP0$&e&TSZWs$p8eE_<(T!D2HY`bI^w6?0r2<4^V*&F|;?wE_eKk+4-Nrw% zb?kRWxwpRReG9Fj!A+F6jU86{IM;buhy@FdQoi-AHs4FpQKmlROPTBs^}2zar85@L zC>*XYv*r4w=(*0?C&#@3mfIMZ^fBAyu4APfGXu#YVf{h4Ctzu$LC2=%V=HB+!^`A} zTpC=C&7%JF+g_{k73H3Lwxy3=j~Q&T6Y9=7l$qa=y6Qh*>KL4q_IQg`xHF7&+zS5aS+9uSnN?s8~!xtmlT6mfcn<=}_b*fPH?z_yXuT*V-`f+8G6 zBvI6}2)^dxXvF>1^w264!p;@?V;{}Y)a5wb1V;T(-TphQ<~o;L_}ip0vA46hj!dU$ zyyXm5_kl}tdB7Ydkgo(A8lR?{t0L)J`>U9c7)L?~@&2_+-e6A%blqakf}MM=bo=~0 z{{UO+ohLCIoy)LBg5hm$RZNzWv8m=BMptJdfD>OjXB#D?Z4%~Y9x>SQLN!Rmn?cnI zWO}{(TIBvfzs_6t8|!s@JSB_UD`WB&Y~@UF#!Ee9S^(;5r0uIpaz_Zn4h}UQ9z|D2 zj3?)L9no=b)L@UHrq|^bntSQTQ zkO%3)?qhy~*sHUl@Fud0=q(ZsUJ&mRZ=P9e+1>UU9&VV~!h_HKA{`tcT5L zPlKy$Y{vpKJN_thiLQF=x(n8O6!(}rOL;B?*ESFfHdxJ2#|RZePXwz2t617NFiYP+ z8pmRddcJbnX9GjvIO`p z&jw#ZRxDDSvB(-ca1H zLpF5-6R7b)eYWeaXUyKiJS9JyE$~;$;`BO4X(Ib+eLA5<1W}bi6y(6yaY5%*bINvz-5ZKh<*wzp zynsU#?_~^9M&;NL!72-E2a~;inzN)n>x|(`{9`q}w`|8<%f#8*%tW!%W>Pn9BDDj~ zr;h*y%5gau)~izV)=H zwO(!!BYje4=03aJud;I7W8V2ej?&CXIn&!5T1BH(1JqN0O|?625s4(|7KB!6F$1RQ zr+~_LzL$oz>-}}1p2kEEac#&Mt*T29eZx`)-VTr3O<5I~669*hJ1{Zsk&!;H1WD2R zgpzjLjzcAUWSZVIl0=b#NJ~fzlnzMVnzyJ`7|NjyR5NItH(k0}uWn>X1O>mO z8P{eMuj>6I5$mgb*+{XDg=qVBG;FNK-?bB`ZTT+|gll7MZ4`FWsTnJ9BrdaCBHA>b*^w@|l|Jxq0-%xyk*awc zE%g%N`sk_a?UvP=V*+^ZiZJZTLXZHYRby`%jdH_EyS9fVn zs@$Cs`_#CWi@PY#n69V8!FMb-DQ{tMYUz)%geZ~PGgjp0riahlA=TF|HI#81^M~^EhdEhTrM7lJ9wU+fT7yrOv0|md>&+Lx4Ige=Yoz z^9B~xtnV>axHu+`Rgv#6<08LbX5C4q;0n`JG-1Nq1#+~O^55d9Ee5b;`SgDT#Y2&gQhE*5 zi_JsQa=7a*ztr-!nLfjuyBQwY+1(b-2=2Y0MWwj_0l46_-^tcui|vJ+=cGU(ncrR2 zv?TB~4bNW>BAK>%d3zsH#r3RrNRr>I^ecqB?^f572;{nvS$SKl?s>^pV&1_Q$67T#@+y038gH+~OyL@35Qn{>_~1 z+*ocmuc_ZwFcG3M$i6(S+sBdr03-tQEFWyL$6HzS>#1SBo)0X*^Fom_h!QLTTX)uYJ6_{`*(`99+xnB&eimLbhdktqsQDddU+rLrrX)EI(2-NAPI)74B$MrTbrEgs9*dj9~)pCV#= zE+!cI?_091w_MBQBI9Xwl8=j|bGe`jR1-t%O({h-_Ut$>G{Bv!4gW;S*A*p-!}O_X{H4L{qd zo!Or3kGH3%z^_^E2%aNEA0#F|nQ`7QO_Ib|T1{yu#$9&dedbG%_sbf|-fvN~oI&2hj9Gw^sN*(+)ochP}q;``>QN%bx6+avBRLQqc^rZl2hx zbB#=AmztJuT^vRtJa(*T518@)094d%L1i@*r>3oG%jFWseNbmvi9B@*@=4h(X0^7-THT9TZ(2Fw$VEcF*c^kU zYx>a8YQmd@r08I_$k!jAwYN##Z%7v`J*nM|Srq;LJ!!Dj$GsFO#K})s-NBLW(BBs0 zZ*zcXEy?iVCk?pfO;9 zB$BiyMN%q26a)~|^IcP8&UK$iaG1x-jJ~o$p_%XT-{l^+?mSpaY?3HG;Ix$U?FVtr|k5XuP=Wt;vV_Y1-!)D!WAN-E820#%#6R*3$M1m-Ze(`-HYyGDqvX znu2{c+g+YXZ5OM=eM+!6Z;q_`z9Ktpb%G>mBD&Ol=YrH3eMvq}<4Z_NadgSAiV^wC ztwY*6wdP9J*<`%KNcVQqz)KR!sd+1z}A}$8xEglKIHZ*u_j1CG4NV7 z#hyang9x}40kF&r}e3>a$;P3lB_E^NYzl5 zkT&WG*>|UnPa`CV@s{gN{8Iz>IUeT1JF9Eg`mR13tX3mD*K?m~rxDuS9%?rG3?WDa zFPeI4hxe7t6UCW*UdY`>Hz4L;+v1Em?{UL_-go_a*3*S9;%rsWq``zhBS!4MabT(1n)&0Ftsq##%bGxQHG}d3%Vf?T{VKHYC;Re2*HJY3wH+^L2+t z&<#HLePWc9kp4LbOoXXPmGLD4VOZ{xMJ6>WD?>lIpVOg_h+$= zpqVLB%td&uMzeB_@y?8&(bwR!k)~qcL+-1NGZdY#9hAD9Pic2FjJDXzP)t#)6T)yv zr9eAv{{W7y{hIEwbC#dt7Rv08+i`$yEvf79QkR$6`~|vWI}!I+7>aFfZOM+YL@JUs z38Mi(b=xo_I{jcsIxSfAUOm7adu)Dxv1LCJ0Fjh>f<60DA1{*LX1ArK|gT- zdg`=MOxPip=N!I?QxFk<~i~_f@m)*!+xfbk45?D)GZ+ zzxtCv3vSEzy#**d>X_wMc;hVvni4uURcDV^5=?ZL`E?#a3|re_y@%>{S!nWkj?;Fx zIgI}R+AMiY+>GzU1&sqS0B!Udtgg%Fce^lQ7>*!#fwB3fZ#9VV8=XwNN;t_&-RyDy z0RI3Aa9vJJZRNeZnG4LNyz6i-t-0@VlCvq1+ylfKZa)nyJ6t{Q+}QJfq z8z`QGjO|x>E02x7E$=b6P|DVPh4xCuL4S;sm?qlgP;hr*S;GeWSZVx=MUjp6M2joX zKx*yO`6~HF5wm`a+V!h%{{SEH9$Oi2X>)bX@uqo5RfU@cl!gVG{BFKEYnbdq->$e@wR^z*GVQ zFe4(dVGiSgpru+ z9!_xCM<^wXt_2rr?@HHAQDu|2)DIf;cGgNa+suuh`rR!14{(alqucf0`*QyP1%g|| zvF@FX99_wZf5^X+T&M@I|Sa_W7DsN%X$ zY_sk6>y*99dmTR!k&YA=mSy-fw-84`$&eQ1vv?Y`z|5|5m2!x3Jxp{5s0YPXWLFsu zdc1z0hea*f#tvk_^^5{pm>y>pD z*ToJN6C&OzWYWVdeaarI)oyanb1m7z-+tj_$K&y3m^&}H)#TEvl~cPBTIl4FDDof! z)2ec~&T%Z?G(qvV$k`5shQ!!r?xw%$*(--jM3y!)*ja`CjmV7hVGDmU6ZV5uK82{< zMUFOH_C!9FXWzSF=)ETvoAc2)vJ(YM&(9sQP3LU`{_uE$oM{*IG3@hnmIMepLs z+sSOlA#ZOKkV7ov?jxGKWkUMWszD!^*ChdbSd*SrZ{W!GO z>OQi1mBME@uyj9h(ao%9mONWpHtg_QD=|V%c$#hnJ#_c7VjrUQbpSynZpGH&BF^I8 zIUK(TW&0Dj^?6g42dYqx=-jDA7R$;j9UOPl;;7P6n(g4VX> zME*EAB>s@ZlfHy$5!4_~K?hz%Z6%gMC^!@pc;O(2p&!(59=yEiDKt--ud-p;bBVBS z-Pt4+HaAvOH!=lj?mk4!_jxvU5VyU)&X%D{7B%jm0=3iWX+# zzB~%oOXg+!)^Lx~n1+d4@+HLh+sJYEvWLk$Mr@-(NA!L6EU+3Ay#TF$@TRQ$bwCH` zyE!^B_)9eQ891a16tW3R!X`gpZ)~cATF|v=T5Y9!#0iXKH(x^j4rA@kCrK4*`wwHd z!DD91-C8AMDGCX~|LT*CzU#MC9i6v<|J+6MUnX!1a#HZ8*A) zd=>kr?u_cCg4KzAzS5~$AIiEzA2ihn^4-$?zIN(n9lR3j zX?sF1}E)LMAKzXMy?g7=oc|y6D>YYuuEeIf#c&^MydjpZPfB3U=Ed@ zJhCJ0xq;v8S&s7?a%Kb)JLNvnWtq++L2}=;g20JN(Iz#0nSmmvyKFpa(*p=mNfMta z6|C$S5~8TeTa8w4ivDoxI3JHPDLzEo-6^79`T^nHH? zlD*8i%TTIDU$J+uoAWuWW%g>@1s*aEb2~`v*AKB4(aNeKM`aA#_yW7?-mGdcK(Lc3 zpSdXs%IO%h0k@hjwd?jcD;~Rm%i?c(?gt}iZt!qpoXdMH>DnuqL}IMRejo}H=WTLX z2H<4FQ`cY<;v5Kc!ph?^e-~kTKWEGKoQ`AV?6lz9MQ{YR@Y*=y@U$oi0LMV&Z(WAk zMsbErr&N6~BU9i!yxkPcM_&>GV`TE31LNe?_BL{m506j6giS`F*D(*~b+Bi<9^SUVlpbRa+|?xomr-A)^0(z6!%ZsUJ+ z);Ug}Jz}_m@<0sJw-h7x>IG_89fb+4G`4k;HWM;pI!JXM2$HV5BgPKncUpa;8<50y z6V>t|d(U9)k{h^hZB?M2(;yL5fN{vIG3*6S{{Twpv{&!M*ffkcf26lxiR!%@VO)3x zgTM6~e~7I*oz@=yWbQW)?<9e|03mxbhbORa6vw;<2g=Ubu%POr>67BZJr0=k<=StC5PwtLH z!x#pL@gMc*p0fJk5dcE|_IrNaCFWvG)!mO}>pvdJZno-{nDnLeY@dad^!#hPnY zdF9DOJ;*JUc(HjNULvG;_1{-Gn6s+MWby{-1^^v6c%WNjGze%!ed>EQ{oANpbWZKu zE~4!7b{ga2>Dg!sEwzQ)aY~ge$m)t3Pe3ZWdDO9~#lM|37UEpUb)M|2<8Kc)T6vr9 ztCGz1>{nQq7kzE<^|8xH*KO^lS0}te-rcB6ItDG|?iGD>?r_}-j z4>abc1@uZMvc@fjD=~<>p2qtxbS&WOFwXf8+v8+T7Z8LXnui3a9x4Z>u3;(rl8bHE zqUe|@qt$*G`7U_wyDmSh-ebD;skphtVDdypk|XY_k@6vApagB}q-WmoXg=KL34WS| z589g~;W<7#?KwP*{{XPIwOdPzH(2C~bPFQRgYQuhAE*j~0_wuK$j#tEkBR}VH13`7 z$Cz8CHxWl1AOOkAM^m@zv2Y?vm#2SV=L6nI!hk{2cv@UlrADl^#x&vO%3xE55aB zZ}WaGz`(z@zb$^Er}eFrvVZ;Y)6R}?rdkemmwsR+(_q~?N;|Bpe$-m`=2_#qEIntsT~yeSjS|VOL329Sy{rohM`aq zP#LMUMwHBy7{5if>=auGL`O$P*OK-ru~!jY-OY7nBmUksEX)^;4GRAND5(@WuZ2*A z!SN2YHpcnWD4jtm$Ix;a{=02!dbv4`wmBKW|BrAApoc!Q>!Uob*Gd= zCGm0j==&m^b>d{C$YnD*L7KY7a#>R0SCA3_Da4jvwShGoZ?9q1U5)O2K>U@hZC09U zv1WcW#mpeeTH8Y`?T@-v!`v|u%+W*}`awwhywhq8zPgqDx*~j{yL~Z~bOfIzb^aXr zLe*6)b1=_sXe?lkF>n1d$R{RhA6kGs`g&@H%Iu#YcG)YNkH`svG*~lzlFzv2XU0Vb zy^M4Q?yT>~CV^xbh}kFl7Bf5Dy9P4REbre6WHV;XlC+KO z0$q?W6h@Hkw&J?1?iR*@BfO5@A2pQ8&NB>T+N-ye`Fpbbd5wn;)BZX(78dFeEj^gy zlvjFyTXDaWxz<-7Cd(YkQ4&8NyG5GGs&bK#B#*&X{NH%)UnpfKOvUs)R~a19!;{Kg zC%mvMwEz{4h_$Dw^wp+5C0NJYVL2Zjiocxf(qB*G@JARM9Ckk&VRzPW(PcUx#@FKK zH7)zbw;8OZ*ChSK(YX%D0pgV$d?3&-jHf0$&+H7Cg|Q3((mg*iA-liLUySqW+?H*me1+Ywow( zC+c?}F?JrnZSvNW73y$ZsV^to+^z!srB(j`lu!_)dhM$rjZ`8b4T%m?(8%=>FnpGN zl$}+uU4!Ia`354>D?NcW_^UvDq-R*lx-{8*z~9xg4Y1O$+>$lDuhd@$#>=@5U4)5ZXz^k|Iyk2&HO&9Un6uH&i1( z{a|nXmm^^h5xcFd#yaB9DAv8kd8|8DV$Cml=5Glvy4KPnkZoQ!3Col2)f6CYq^Pu6 zSQP18NfHw4_BxQS+b=yyQ9a+NXF3i8tatma!*P4vuWr@|v6rY8DdKdbquM-*Dw=)O zCY7y5+U;3*_fB~5h>db?L~aygIYrx99sUYyBYD;5=o0*8)P<*E?%j&w<2BiCEaUbu z_DwkZ(HV~*s;U)zNhHt>H5%-)eB>km&^v7%0Xp^FDQS^BKm-n|GZ&V+9V@5VP1mBf z=ytbL8w@lA8S)peSpDW8k8pTmwg7;b}QlK^PIsP4O&8*BGVB1A#^j5gqx{{V=MM;jtGPG^Yr0o%yY0Na6S`-!wv*g;o94vzn<;Hd!f}(R8-wLnV0G-~LDQ3734TdVvV@+V=FwpzlL z-5;&?OQtedz^NucS)zoBCuJVo1d6kPQ&M&ZRabZHI$BbfD>#>JxpYvm=3An+9~S=r zj9nMw$!szv!aMBIbt`>32{@3y${)x2Q&f4~_b0d+M9+jzhUk%;YuP(4aH(fyHMH3I zZtaYo+A@VTk++wre1{x8Vr|Ugwe^N**LHsXl|q)#fCn z!%f0Mag#b9hvYF7a$VcUB+l+=KTI+O{{Yhjfm)sa0MkV`&vJx$9h9Zi@@0fMOPFMU zl>Y#uv<|Aovhrp;NT4*{f|_T6Fy|wxnB)fPQyFJs(IX^ZvUXqTfK{1TZpBx6ZM{yV z2*;}MC1$a;g6{pDrF*0wXSpI(MJZNdNLnAJrU-OaByNmm#K&xd`yIrYB97l_h&*8m zmZeA)q3{4dp1ouNLWSVykzMN>6`iBVjG}NQ=L?nQc>Uhwi$E&h%R#E#fFd1DjDn-t zn5F*!_|Mc^+yY;gADfQ;1cLEf7UD~mDhw3B=hPetj-mNH$#1k+w!tv$HM=v{*l#Ci`W(@OUouTwh& zdcaDq^<`y>%znZ4N=U=fX$I~ZszaN)W#a{g{O}5NhoK!bgmdhM&jD!G6~N}x)CTGZ3V ztz`&Bk5ah{L79w>pabN#ruklc!rS%Et9=xb*%p#)qpcFjO0+Xc54-x@s{o_x_j+kO zyyF4s=$wvU?k$8VE#jCs3(JdpY3%OumlqLY!bx(d7N#OaTiiqTpZJ9}l>P0dN3N?= zZ(*u11QV#$)tKtPH2ihfy6eSZWXC0UZY{0s?ZZnnh9Fg4%L)SQc?xM&i#%%i@aUk#A`8v_A6Lhj}f~Ne@$EJeRb^2!%Rr#6O#~pg#8G-oyEG%pySkV*Pimgf(EYek&(xj2TvVzMe$j(8$bm5IyF<+zZsupvMmpKWz3Z@TJ)xU`>d z1qoGNIXoPZv#EUD{y!_+Bl$}u)GhBZSeZh%MUB0ixUA6tg63$)9%8H*DD?e1?WQs- zx7+sg65p3bK&mWFuj=qc-Pb<-x^AUscZ-{-bbzLK?(P7NN#iihnu-q(}C2TH+YN1M;*GgwR)bINr1 zZMvPUxyRtJ2sqq4jGPwY7?+4rpwTz3REf2Z|gZOwS?(dT+?Iv{8tG5qqx1hTtytAgtE2MM0q>h$^xD1zl{qMx5_q3 z`(4h%tUd^)_bt#UjOSYIUG8i(?rK|p!IZwVd~Oe>M;U+EV;X@jWL`>Pox<{|ASnAz z_0eIG3~2pYoyqE$^J|2@~)SY?)s0DbW^s@L=)3xYdXKhIJ_*fG#EQeRV7XB#6$Y`KG>)Ar$5w? zfH$e9(^!w`&E>O}tgR!pv$t9Kko+SAJzl)L?d1CC7qqr1nrwAZK7aUvd%Syh{-qk)cAmZu?&aAQ z?hwJ6&T;<$19(&XjYb<+TA$fV0(e+@D_ zg=bAt*3exLV|z`azZ>g>Hpu`H-!%M-4@mg|PdijsQ~mUObVK%vGR9}{_tpWdqn#v> zv{#Y9fXK>1@?ge>hJ(o(Jvp(SfD(pc#WJy2ORKcETPr8Cu_apGS(c$xea!7zAtt^7 zH2UfINRi;U#GfQ}e70DKBzUi;kjWEAJf=shlj$0$19Rg<;!?(@Ix4;LUQP>Zd2#Ik zzPXWNkjNU1A1^BAwPvr8>7;TbHc!>`vZl9paLMzAFg#=W9eDdO?Z`bQCvUIc>!l|A zSAv}{?AS?nl%H~bJ(Ow`8zM7!zmYZiM)J0gb z6T9#5bzBxg->fpUnmCKA8xBkNDutk7>b2mfj@Ed(c^>f&y@6Bl%Hi;r?>(s zE~Jt27nia~_cBZyle~a^?0rp6v`~W&ZM`8x{XS|}t}WEg*3l;^c^H*fWA_>rsrq<2 zTPuuOM|HIRH#tf?TSYx*ki5pc)+${QosmU$<=}x&rjwO1v7}a;1G-&6mN+^Rifyu) zZoO}7bsfC6(6wW|xeLiaCV|?7kVlQj@zPgOk9=e$sS4EPW?`7gnkTB$zBYGjo$NQr z*F~g67S2nBR|F}j<5oVD8*1|tAB9{%(PX=}MONflb()!%Zw`w{!uN4NmePnVW(^@x z$w6J}Lbmh-Qomhx%SA!YL^|+glQ1hRf2|QNUmTcRe_opm1;%Fb;lFuc7nMS=`(z`| zz<&d(x~voAWOP=!7-pK_aiT?p>JuF1`)#*fzkp&Q#a%-bX?Jzl0oWSt zr2wik(^YzI&$7NU4aJ>3{xj>5-{37J78Y^#xNd#>dCaRE)6`X{?Z{Jyj{|e4WLFt_ zVn8Z*cZXx@^YcbGza@5F%^V#|3D>5|-Pyw;PA$0rTXa(Dr*TiOpIstkCYZs~rP3nke3ya_mZI<6%6!19kKA zRj|mNgvaR}U$2sq=^b0C-emg)jrLN;_pI4V9mZQZc2DlE?jS-LB6MZ`mc>H&^ZDzx z&dUIq0zLq3_9;TRRCGEioJ2T0HQrym<}p{eZ;7*)l26z0whXUg&t&SVB_r;MUDuEX zf$9FEjNx>|BIeCrK0o$Avq^ICTitiEV|(9g&3u!P!^x1xU}23ddex)<0Mxab{{U3Z zVjry*i$y=fT%D1OBMBEIoqBu}Ayk9N?a(Ps&Dt*hL(BEs+|1{ubco`TTo3mfej@T? zAGZ=rwW3G&mL#7|yi;8@OBx1H9EtE;u0G+^w}4xj?2mE2z~gg0f+!o~{A}?<7AD!m zBJvdbRhNigv+dYz%Rm*z!O9v~&V0O+bjCH$Q}SN=_fo_5#>{pe+1WBzT^h;5V{jL9 z#)jm@LaQ?hKenInlTs;Ds8@KP#!()!iGL6YJDrw}6EBD883=pVVYkRy^o)i|&Cxs2D70d-@va+8_*23@>AJw`?IZP@!gZZTgiRcK0{ts zHaK|-USb^q_EHsUsn~$^ttt0t6^L5{UW^B_`KUJl`-8P z5y13*mzA)|oN$*TpUSrsifJGsK@4CDeZ*}~1AiLkD6*n$xE@F8*Xj89qY3l$*XaYt zUy35^6L!1qz0mKoIQVVsa+guVgX*wO_Rh-6Rc?LtuLfF|284M(O-lwe(e77K^&L!) zSJej~O3ze|gJIyjb}p4Pk@W0$QqSdkM|z&&C(L#Xe7@|M`*1$`Fe?;(=Nc-v{8c_R z7)}7o-W(|!jdhXn^3^Y-;^*r-s}t4!1kZLnHI@soLko`iKhfHmhScF zUP9ZR3ZR|qsyxr+L>WqlGCq2If{k^UjZUiBbnk|G_5#foJ2~WC9wgg8j1+M@C(|wCI1!?A*W<*gR(*U7YBPk(S zE$%Mcn@Xn9xpQ?aYfZO4yXwF8^S{+*-U*R09hcBqt&|?;Hm|!{0YYo%_-oLznofa5 zKI|}~*@X+qa5>kak?xa`=Y+l|OjEx-)u^*&R`qa{=eE?)~ z97d`_wo&;z+l+f#%>4ME3{?jwElP2%e;Vq>6~~LR_a@m3WEUHh$KJtr2NVDx6cGVUH#O}CV^HI9!e=X94GI zD90lTAnrP#Uo*|oCsMWTU8N$O+aHdCMU2HOO784{(ShO$iUaCFKD%kp+ttaP2FD>; zZng2hOvbENdHdVUtmt{HN0M8Os0S4yyN?^!R+Zz`2)R|}TC%jAVBN11Ax#=H za)TqHg6=uLbpF>HY_LViOg9p}9jU!Xk_`un4N7v+POuqM4&!<*GPlveJcXcxQ%X2gsaM|N3hU@+|YkejfbwgA*d)lSO3K!WshSR=+Iuz zZTn@Fg2@Q?3LW;M)thz$Wz$vPDNZ2uLg%VXXKXRH_Eoi&ZX)(qh=E+09RTJ-vWhCQ zmZ&}m)vb9FGIVZecdBM>@zN?>!|n--nB)VIR-po;`1Sh#N?odeLki5aCv-aP0Qjxpj@tu}*cm{XU%uh@a?azqi{Uk61*T&;e z$u-*&#lp74y4c8DPegU`?aNEpuTv=tLol^=wTfpkg=cPj$!ZD#O;Yz5#ufT@(P}## zmR)t8_oiQ$`=hCI*=%E8V-@lFD@)s!xrXj`Vr|MGM^MCmgX9VwD*;@qc<58tU3FOm z0^&fTPK!Tv>|MBFI-CuC{kAB|T`j1$iq#Yb_PxexQHxi7{+fnWl~NvBPok=Yvd1S+ zPX7Qtiq+!c?;i`|ZtiUHcK5fr0q$R2!ElCGkkFMaBXXep4zo9z=TD!yVtkcOeq3*t zD?LRd^Zx)Ve2L!j*&KECs$o{v&Rg60ZzolX+Sv$D$BQv%Vn9S%yB`{~qqf448QEc$ zy>(gpZRALYq$2+SiaIw~$#gT>`-Ri>95y|#ZZa7NG7tNl(=g&8lG-Z!NP$QL_44|9 zi#tA0wV=`svp3Dup%cSW>&HyF>V1yd41dT5&kKO)68p9`cagQON_>2kmeL8MUu%*D zJer`EI_-edYYt%(~qs7XW#ogg9G81L{?3t_&!yUTp-u3RNgh6AGiplCt!zerz znRy6>DhG`zlX&;JV~4zcZknd)tCV?5

$*UlZECN1x?6I@U|Bd{Ji^!^0*ElBDYp zsQu84mi7oVuO27#*F~%v={2x3I=6x9p=_Li;El4`*}hBc)_BgBTi zx3<|w@%}dSOO5I>{r8U~AxtHezf3^=$|+N~pw+DFiF+d!kq$^nJcnPB9JkNOE=xJ` zhBnsQD@&C_*VutIvO<5QI9FhdK1+=2Q4L_e43o z-ghOQOwWJCT^!DS)Aqd^6OEN)mdEWZCgjJ8mQ_2QyoyxokggR#!}5=+UI}+1MDG0* zRrU^kkz;DvUch?~ZS3la+HUec^-#xZUVu1Sg-GlAM#P#O^h#&SVAmFecGv3jMY#pl{HRY{}aJ0rirW+_@G~}%z0GbWF`1I8&?*7bLu+RCA`Y6b- zXLWOWzsCJTtz+PV#}OG=kStOpLKp_#iURWd&Z=^)<~dsPC(VHQAOIgg#T={{yChjB z1>Hv|kLg9Hpix~0yYcC%__-*22|KM@%!AJw`v}&tqaSugry>si4}qXvmTiCoK+B?$ z*Hx^?9d8J^os5%_v8g|+Z`3R2@YIG>X@hP`_qJa}oA(yhuWK1&AHJZfC@o5T0I1(m zvoDNTDSIJ$znakyFt-mWBZ_h4!l&@46w)~va+GwB+=*Rn)j{0BcY5E{0?OF#WPRDV z_#1j{>8^}vPA?+My!iR3n6ccLLk#kll(wPq5`#2dAdKoBV3tRTl6=wznvY zqvd(o<1Mjjkx62!w415qIPyM*p!m~!Pt5{a7FH7ixptie^vw&vqD2=9UZ02+pxgBA zU0GvOJ2jtFyrIOpb#(}CS_pAg{^iQI^=>_+B?)6%JdR1*>0jZi%xa?$+obI;vLlWc zONolKje8IZRQ=*Q02K%o1M9w+;KxJ`E`@tmLfRubPyYb>#uO-c7ToOA9*5IWwoD3x zk)TxTyyhW+-gEB36fW~|hyqH)tj%4`Hx)XivQH%BK9xcL0KH6dM=R}~Yj!V2!}a@> zAUD^4I%x6JRSr6@|JSNGY%TrVS2;|Dya^MPiYAb%l1lhsD@u6>>8!P~9SSV2h(fmy z1lbP6lgQW2I;{5gmgIX#BL&$Idao)9lS5KZjX4rp8f>E-RbK2l*)Bsy$f?apfW$b& zA$I0?icxW{>riy7gD>LAhv9GIffsb2t-{B&Ckq&yit+xUw;TRLxbTt-hN zn8(=QgQi0v8x)vG(MG5RUERR&0I%n&?5wC`$CaZ_o@-N)WFr@?UOt2Hk6g938LX@e z+q0+MoHQ(q%u<_J+ha zB56ZSskj4F82#LGJc%dbovxT!q1dHu&&Rla@-H!W$}Ia;ikb15i*Oa$7Mzlc`iiv+ zDe^ZPjcBUxc4M@i;nhYacvJMBNh+rH>vplp-N!*{p^6`ZY|*;TEiKv)5*=K0xtC;mD^T0 zwc^Ge;DL|C@9@!QbBv#lK3^oq={-y1%PekdxaG2<{{Y)-vxB~!{mJE#4-}e8eW8>k zPyw|@jb-w*h8zch-{Rg09!3C`b?VSyay>sI-}~-n+RGx*`s`88d)v$L_g}FR;qT)#hT>v_gg_6ql?U2DG$&m@wywc5U#drE`_lF0TO!2h z7EXf~U+tK@UuyXh*8ynQ3x8%QVOqSV*c3@5h|mV06jNGmbe3jClo>{+pDnaz`KFvq za+c#{Ba=3lxy+}?mMk$>ShEnqV6rh7?eZ3puG2a%5EwD#88rf*j-0C@penOc&h7U) zTE~>U6xUtH_D=DWxy~*!Zr`5BB1dPIiC)sf8WMmDXKHh*iV_Enhmv%q*HMiYk!T(D zb1yF?<69n1te-;v02D7S?j+0iJhZnmU#c6MLhP`(TK%pzE;s!o@%%NP%Kk(@_0kMS zLZirmsda98zenmFZaA$l__e#bu~4Y2fg)0NqM#drsxxQ1E>k8_`#$9fT|O<*F)uC4 z+r%!e6(S@U(m%CUn~Eocr!pfsL~=i3UJ@>~Cdx|?Ht{dIknttkb6=;OzPhWoB2akl z6ETNXsxnJIzY0ert`$QQ`WJnTO?Ns_WLY4=NLQ<@5o6OMa}>_ft8$~O7E}})fxQp$ z)MIm&Iv%Qgo1&hQJYGI06Uc74FZ__T{41s`$fglsOo{|$BQ`@a<=~n%HgPd{Pa3fO zBkB2lbfjcw>P$)v)n~NK&K2I~-}f$y$U>?}`Vs>9(^%JE5=@!C2y%}vq^V{7-bak{ z*3Q?@379FM--@WI@up)r-4^_GP;NeJJ&gDtb(SmQ(nPK7u^^N#wW!#i2D|In;;0uY zGK9(tp3@l~Ry&o(%aUMA8;3l}Y7k0+_0SG<`X)F85!)xfyScVl@oQ@mmNCc7EQ5lI z6{oJFMf18&eJPsaBj&j0{#jgG-~5jrBGK2~hu^-wJez~u6i7`}S98GbJd>}oe-o_N zK#|qbITLvyo`rLiv^En%1i!eKj8i$}DI7=zS9&kpDf~5eZ6}u@qFz%3s~v~W<*ww3 zqMWQON~Gjd?k1H@{^9}Lld0D$0UDsnPrixkW01JPSVa8&Mj;Q_05SG({&OxBSa~N(b6a6Z00Kg z4ddqXR+iICC$!N?I;d4s$x1U2SpGt~tjxMDbws8wwyK8wq2KYD*)MV3dnC68;vd_k z-M=M-q_hkRI;o%lgF(KlL?&e9XvoK84>w+rS=wE-oUvP*CgA`@{;a2vsixeOSCtRo zJn0|YdYScfE?Teu(5gK~>k(@vTO*Ub3ob-7D%UdBC11Hxfb0zbA6;dv2a|C^oF8P8 z?|BJsZ*T80*G9(Fn~Pg$pe@DY<~g?^vj$^dr8XWlZ-_*LGH$r7FN!`6?LRhlG{Ruq zLt|$ewsSQ)lNGlr@lc?G%xSP6#+v7M@0Ir2-0MFDWko*W)G+Z~H|ihYZ^QNw*<-Uf zdz}7CL@9TYo>u1X(GcC07JYS-<5P%+#;T2>*!mg**(s}w^)8M`I z3n3Fb3RDoZe{o9Jas@Q3{{WX{NP%~#1=+|<2#pX_-x_yck1oG|?OS$BJ4Y_^K?g3v`dY@j9d@^JXnQgT75$r z*G^++M#_z6S5MtgwxjS>)BL{b_E`S_kgy$!+SwtB_RL1kzi&TjgDq57a2s2{;aO9b zJ(Rog*;QjkG60Z-tut6`#iBdq^Hwu31>=pqxKvYeHsebA>6_@BZF2`!FfwmckcGwE z*^In6KEK*Bron#v&Pwmv5Wy0HTrDeB6!q1eF!td0Q~~haA$9TufKvyi;qkB2@_CG| zVBzsscGBBQZKkBx$rO%OTcE(OW;6vW$7&kaNatkERtQ2aJ{x(DG_6}aiju$n(}}=b zX74VVKG**MO&y*p(m!TM=Ztb3?Awr%UBGIf0Y!fuLa5titpVt5M`=~mmI%ex;;@BMxCFA+$>iUCvP>Z35&YyzT5ncJXE z#$*8fP;XnvVQeS9%XT-pxA^BHn3D6WV4f~kmx*q{nS-Cx7cfr&N{<_8`E*@mL|_23 zuDc?`gh4XvvXsi+!SbhMzV6cFAULkEX0})^tmN8UUE5H}Ez&CSU|f%2~Tyhg#^C$kD+RLhfaF@y{?NP0Z>RjzXvb5~TTRYFRf^ zhgTWcAK~{d@*_evS0AT;jUM&=wrk{k%l8bbAQrY_AeJ_u0C+`j_b3DE+>Ww&f61dC z)1+~U->aV$<7he!Rk_Atu>Ee@+8av?co9`nB$a;EZvu#$lm7toO=lxG+ahF1_@S8{ zk#teMPlQTBK><9N?cfhV;C@3yuPRqag1`?Y&Mm6>u>~%Pwq|I-4LOkUEmAg9=7UOG zWW?NktF8!J&jf@>(1^^|+k5gg6#*2j8oPhKjXa6i`maU+EHT9t&ee@3P%R{o!gFVvIk zs_N_aSn@82?4z3c5g0^)J7sScWEpQV`+^FZJNw`aF!;U&h+I^!GG}6Sre{djzs@$4Nn(s}ydDV1;W%PwUMMzzC8yi^6wqLu=-c$vO))G}}X}hosO~9zz zM3-!k>bNe9f?anm)B6}EzrVCwJ6M)Sc6JKTgk69Ox!l&epY2_v52?a65+FR61wKyg zQ1?HUyB>9Kq84&pG*+|9rBPvs@ef6*-1q~1eb2Vy{?`r=KT!!IC{BB={{Z{r0gYT) zgD@IPVeLJ*Q^VV!DokqCEy$nMQ~Y%yUDi5@5CxNe1M*#z)>2#GZS#3)&nJnNF>MdH zX~Tm^!-|3Ah0>Q;Y0`3Hi(VzR=@_F?@~3=Qt)RqM;kw${s)?_zx#X_okvHVbMRdNQC-XCzmTWEJ-y#*o>zFsD|;l4$sUxWl!Og|*C_5eIWzi< zVx5m;$EGerkg0ZGDLv)X*?ZaLyS$W@vW{sQRx+^tPsp8+ZgqE5OoqDou4GeAEIARc zM9}#AquUD@bNBN5_eiTOHXti22?^rl@8XpwNOv6R@(_-JW#u5{x~5!<175+&kL}}P zu|rc%EUZk1qLnnDA4>WP>aalZC+|x0^)LU?q>GQRwZ~lAyvuTBiC!B~xQpF#%s7yK zhPyngMgin5SF+b-}}sx4$VDx3FX^Xg>hM>AsU0UZMub^gx2^pIze=xZc6i`$de6Ws$_@MWxJx z^*yYhiDRgsBWjB4dD^lkm5FsSE;#*)(=~yIiAT9uCcL!1$j*OnX?-GGTEzRYk(>Z` z+|X8<&C$X)L=O}ABg}hs5~|1fZ+U4o&rpU;g4#}yNKBlHpKVn^KWHTGb@VUc6wDi> zHqn(Wa}|y*sN)fTe6UA-b#rQfDEAqcpRG#PioZ(lucL8jdV&<}?xF`BcSZ1C;i1P~=W|x(>oDoV4D$p-BdZmK z#0qLV%k%-Gmf|n~l1PE$F)V7K z1qs~x=?iPLN-&7!7u@cehqp1z9eg?_XIH<&_MApEk55}{FlAh%TgDMd%R7~Um=gpWZSF!WZ5Y5Se~JA5UWP3?jVJv zuL#Mh0crT_w$&*IgndVIs$5%B`i`mxwRaqMW9%v1Z!r&N(&Kfr#M{Ef9^p3(Jt%}P z5Lksf`f7FJNJE5ml($kD02gH`X5HK7PQ_<+@%L0&@tF9*uNIlV=ExY- zgcKi+gJ7cq1l=*_0d?tKY%Z`jz}j2cW1zRPw}=51rK+e}p960+)-x{aFFKd=Mu;bM zI9DMQOl{+FS_uyUzTO3V0r+YWg@8Km0n$mebm4SjiF`*Jj&5pde$*N!M{kIbLP}7+oIX?i*0-I2nlM#!&{BL(9tvZ#qj-13pnCg0MV z9do!{Wuwp+C;)X!{F>ie#H~MKiyZ?Fe33ob8ox>}pQS0Pym$Z@rgn-KS7hSg@qFt1a&uv&yjz1`M9!J4!%kp<#u(5{wA)U0hwpAauRB&B@-l8=J z%_+Y6Uh|R9j6o=ngj-cXKg*x;V%Fl$^Q!e})@bA`E^;x*Kq^2|5j@drKu?ojO>Do2 ze;t{$-*fBHv{Aa~aNssvrCV0W@rNHA(%eW-yjb!Cll7+GU42GV5Fp460t{$2Fn~@z zm+{nA(%RhLu)Di(54Gok{6#kZ08LQ$9_rzs!DxGbYvZi#zJx_%(iRMb{1o7;%(jH}90Nao44K?Br}L9bU)l-eN@G9+q{ zotHUnaVy?jGg@L%0FHnMnSiRI0lQR?K7(3p9CHY<8kJp_b#*+au;_qsFi)J$T9$=j z7Y*#%Th@xAgs3h4?0t22bK}(etX57@$eEGYY^`<^1&)Ir@BPYKX)tjjTxAuOIE~Aw z;Sz@OiWGzO{yNIa^Enp^uG=+W5WVMnOow>IXFqPM)vz)>sJaTi)7;MlkTj)#;0+)H z_?=qE7mX&eeRA97st|`&|I((}tXg5`x4oVzB_rKY6+ZI8SP)0lG4eiRT zkeR+ohbw(*%D=L`{guNh3GT=3p`kleD<2vkd^~XkBhg>>VH0nf>eo~36TerF4~?|_ z=2BiU`zn9*1!!wpRO{inbaBr2CG=W_trmyo8w)E^qn zcWi?^iFhSNQG{IXjOVz8ZOX*HZ z#*90xADS#aE)nyGJK+(q)ne_}cJ~<^W)jX7NF!l+G=p*=hj12~D5%@lf5S`m++ZSR zb(XF3M9i_P;;QCz;SSIKMRyr8`0Ro~i?M&%+ND>CP}Ft=ZV0ES*Vg|4j>Gm?F&~)j z>nl8%xdlG(-}`CV8)Tm6xZOnTDJ8TjDp-w(W?jeRHFXTDEDTgXd?T<^xgY(yZ6a0u zhjYA*sv0+y5#0!1N+=ZkG;d|YjnGeXW2n(1dw2Lb@(v11o6L4z*E0$a>9xW#%){{F zSwPa0(i4EAkE%_IyTaka&PD;WEhBdiBWZVeUHM799D+ZT}&mr$Xv(9Lg8pYcB>9F`%kBVqt{R~-o( z@r^(O`0F1$%A`SaIUU^f`65QP-A=$AvGUuG`Oi>bLI47!eZ+657nDyU@Cr-;S`k7a zj7ai+-GKs?sHdMKYPEna=+3a5mG#4{NJ85IM4+!B>PY-_Sj^ZI$Y7+cF|hrz-6@c# z-5%%dJ5cfQ=j*290NRuTBI|zGEO!ch_jyr`86WOod~&CeO|&SOgXW}51wF;30Nku` ztU`($)Bt|~Y5uxlxqW0ZS66VMvN_y+jFHDA_QoMzaytN`gUR&K`H6a90+$!mpd7c$ zxV&NgycW`ASe6hayQoKCKS3b*)Zq(x02IBq2m)`HJ1$4;##!6oViEFbTp1lV{C6aN zT58N09cMMc7Eu;?Y_#*4b9oEQzwSU-i>C;A5=lH7n+?4+14IC52R%w4A(VuFy1Td% z+heXSVMZ$gN68`!S`y1t1e2xciR7hW0BVPOS8D8-(AE%L*w`yA9@XSlkPrT-;U|yb zUG;H90o)Y1gYJYEvE>-DT-{t;BZ2Q5NXi!c132SP;B`Bv2#+-w(n8U?hq7g@2@bgX zH3<~SEYB-9CaFB+1C0lu(lpsu<<&9ij_5-ndx*j>x7a%+m8L2NZf|uu!kkNTa5=Cn z$M6K|#EF6Wa56qiv6M;IWmLXY{{SE`cWHgrzALx0$wM-|wE25@ib%nM4IC)qqnE6n zX}Hz@0B8O=ky-chq+=7{-`z@5HkPktP-OV~J)OKc9P9IW*b`$9l#FZ;d7xeQI@(LP zQ$#pc$%;E{raeJ5I?qI~w|i!S0UAQ9L}E5I-nF2vs51MM=CnANaj&9cJeb=Og{4Ss z)xEUhN8D6>#Gk{)ss(emXHHK~Bn{a6zB(jpD;bt)oTx;JYQ%b)oxMGEZ;RWdyt*Xp z$&5BB4cL1#q^xf)T1i8b1%P10({H)G596&K9t=joR^@k5UtM}2BRttlv3YXUC^kZB z5CgKHKDxDqA{`cIDSw|Es4Q~nCE;Tgnml2t%Bs!he@{`bk!NDV_d358*T}7|M2U+9XE7Ns z?<;)PlE(@e9GKCHvoUkSlGkBPV>0n0F(LtRqE0q&0y0DfP|-i!BlZ@oOAobMVrnZ> zQ|qjhBr3U+HbLR(L7AMby^W35M)GyF#o4Krd&hbN;wo9G=}$D(#6{C+UyMnnf4T`E#+rT6+Nu>vPUNWLR{sF{b*b!qv|Bz>`XI>* z$-c&p%5(e%zR6{>_jm>@U#(oc^RseeEH@jYKH{ZVGS~3d%eZNDM05mx3ZE|<+LnWL z+N(RLlGRpN4N-QjDk-<7tOSP;I;x;|(P5G#xpZ|S?f`Hb`P5=%%R-?zgV9D@VD0Bp zlf(BN(Z`ei`ewRJj->`r9TMj)@pcO8X${qYpWT0RitGl!SN{N(y=E*1vJO0yg^~k8 zs^82%1N2;u9`@rWi?@WSA}fVeUJiUQAOHsYoiA<}j7*^5j}+Be(}`la{Qm&rzFRMp zSb7ZeX%$SnIW1wTX0w4z}AE`RR_dn)27s$sHae?Z0KYC^rH`K1K1Fj|-*51z@hl~@=YP$M<}tk+Od!LRY#g?5*ABB zbnxi9SS*Gp9w{S6xRulvXwr~`1p2;d$RR~<#oe-ts8IY(-Opp<@Yf8zVSbs=4o z-FrnDZ14mISCZ#vb-B6ORYBYT03S^;`&J8vWEHn>EtSHlw7FUCeX&-IP%8c&Nfi9G z>oEjF#dqG*Rk+udgvD!dbuG-uNA!`nuKxg9ep-RxLl<6&JB6t=_EnpOsz;BFNaH+a zrH7zm-UWXVuL$ze1MOaxyQLOeGoQSYYZI82kyW4CSwmEVx#Jh6{-;gVjToO)T}6OU zClPm-zk=5@eKUsppn;G_9>ZcmB%1wcrz0A{EE~acQUOZb+y4M;4R3ic6Ip;`u#Es! z4d;3RUtRSXW19}-Ae$5y?_;xQ=X)fLCH)dP>Ga=YBGF_;~ z%#mBhdC791xfJjzLJ#`ssAGW=29Nfrn^b`9lU`fHYHZ$W)7_qFS_CXNYATRTeuGQo zQ(Z%dmnk0w&v45`p6=f(e09^LST8d;^vg)bm|R2xd$}YVa4|$0LVl%=tT6F%t^iUC z{{Xwv6lNtc;1Z(G=j`Wl$j^%DJw8}%?IHm&2lCWu+y4NL3dgyT zV|fA*>#oTM8zm-l5N8ILvZ(Uqt_TgMX zWQ_ZmmVpl3WpCwwYO7?7ITV0V&3y}<_x01#rXXmNl;<*eFSD407&!8gDN`N9Ya<#S zT!!^NY54th?0epPc2Vva!`lS~!6PYeZHg8IFs(Ui%0op-5?F=4>Qurbjfi;v08<*B zsb8T2x_~MqI&b`O*k!=Pjv$PSqGef~k#`1w)#*S7f%xyLPUT;?bk#t|#dw>y+E3i< z?n1>DKE`K5eeSmjZl0)S<;VV5b`%t)E5EM03`WM3TT;p_8JGXmsPoB;>@#F9FG)*u zx0P|qUyE|$2k9V@UC#BUveGi7utJ2$-B(P#zqdT7vx5>&eS(ZYB}ody@;g`X)#Vce zCg@;C#dJ^Nj>PYU>RpBjT$_tH7V)dd`;mW6T7$7Q1bElYyS6_Ir54ba`Tqc-+rob3 zK!s~MPI}?yyuKF+97c9w6m%e&C{z*rPO=%)i1RI}*F+&E(5Aa&T@15M(THSEbSV2{ z;=_7+05$WRyi#&nI_RyS0EpF94&mP?`9C(@PYMmcPsYwJGdJ;8mqsH>a`C_#P=7Js z&}-?vkFROs_b$TJN5y~wExZ?{ zKD4bhEDU>Jl(n+Mr;>+on*k7Z*--xTo5^S9$acG^m0+FH*BK2;nC#W4rGW9v%?FLO z^lTd8Ttxx&*XQP?iA1Y`C#pC901rD2#xLgytycZg-!q7sH+3{$aw#Kty+sGyN8w#q z_d3C<6YJ!vd!Zn@?>-v%U1QKj&GtkVX&tI-Q{d~=SO@?t^ofcm$Ib=E0?K1h7BD|m zbvkA0a-e2b-b*WKVQF!7aPeD13f4=Rq*&2ae3HyS+fVzUv>K?H2Gp`dAiB%&-9zNf|ej1@Ta=X`Qe~NRP1`1hnH(1p6MDa#Y54EbySLitGKRpej65n@4 z!!lGvoxj^@5XUSOkOxCf4xoR>vij(g$1Ll@WGsiZO+1VHJ3}K${{SXppzaR)3g{vo z3L!4Fp6fG_KGo#j#9*;_!9r<5K`ISD4fIH3wyTb{nFQOcKw1wn}7E?4;h+N1S)<*ti74uL2n(2FEIq4F?6b}?Ma+zh2+9a)x zv)KS&wb<8UKn1D%0sM3*m{4BA<(GY_OHhd|dj3Y=l;+|{?yr-BPtE?ss6G_X8hs9v zySPMa5E&9RFw~+MFjhG0HM_aKH+E>n#kJFvPq=SYsXj)Sr!cd|ETLpTL0PWL*5GwZ zBw08_$s|#M3bRtR*i?XY)iz3Fa+j3I*)DE7W=8H-TL_+F_25iQt5L8V_B1+wU<|Y$ z)6FS0)&bUc0ZnANx) zk1-MtawrSjN?nOyzim)>_58HubY$2;5iyC_kl%MjC|4;{Fu$oVEe=n@%lh#B#`Hhp+_PEy`QYY;} zc-vSyjymuD+v~7h=WbzahJU)+sJOAmW9D!SsO){&1Zc*JKIb5x!&Wz0JdG@tk<)?P zxA>9J^h6>thY_;Fob2~qKd*K?XF$wZfZAyHVNd0M2umZ05< zJ~eW3Bir?yfjt-}uzP&+EFB_>a-EM2(CjVoJ$~jnl_@o2$f!7_v&;{E>Z-BwF(z7hxkYSl5@HDosz1U0&DPp&i1_+1$;)Nkfz7bNOa= zlKxduMWk&)4FH`$V!t-u&->}>=%S&rOh(&^!6#q8aSZdtEZ+KZiKZp1zv}waUO5mO zBY($F%}DhyuYqNo951Qy)Xl7G`cqaH>^Q`tbuM|B*w2lf(qHH+8K zi3i#T(x*(yLy%B)|J9~hcD9DrR*v)b*HMX+_OYR#k*Ly5DcZFk$~K_!stb*s z!v6qrN76PPNzpjtLx@#}`C50}h{h(J8wGv+TDIb7wqO|PFBrq%M z`P$e?!B!bxW#9#O{{VefU)(Y3F9tsVrx;3IP;=Z|nHDE3rdB82bLahOs^M|C@+A;J z$910N?4S{|n4@F2GD9(-KAJd-Hx<%^43;+&&dX-k(QZA-_K8XIPn?cSv1Oe$gaY| z0v5{mPR*1tM`+1m12E-P+=1)EdTsU5Xu_^U85U8VK?rX0dFQv3K;P;>8nwsm zcUnOa;S*>P%dYYt5}7fKj-NF%knCA&UcYUM>faw8lYy~Fth=XZnW1ZuNfmBI zgK{R~;p%8uP&$Rc=%RPQIV<2fwf3*E?(MeSQS})n+rEBD_im#aT^&VZQ7LZ0Mc=*^*zxOTpbbQN4FVskyZP# z{K1UtT`Je6VsJOO7%uK(vF|y#qx=2Xf99g`G&QE%>^(g-p~UTSy2EEDEb;OiguqRU zTPp!1hhr;k_b@-oJpt9da1e{LDtj)LO!&dn#jZ9paFI?wq!;>Oh!tx3(?Gi7c2C0r zq{SRyakd}acsW*tvP&a9LYGgts2h?%*nLir$eA#8Z-tE*k_tY_*(|GVadB=IR+-|C zG^(^~xj;v!8q;dmRh7?%>et(eEVEkSBYVSZW|NR@QRU_L4q~Q4AJTvWq1&6rw;Q`I z!hlH$g=^OF`1m8?43}>Vk@9)G0^U!b$5nS`PFs|J+kzPn<*~>z2+)N6yrhq6p+Y4% zA0BE&clznvd)^wPZpPwSRV;;)V{ zVb%Wtb(!tBOf0SMSV@qMIfH@!0467Smh ztLY8BymNc;-$a1Dv&L$V2s;|o)OaIpe3K)*p5K-v2h=rRL$4Yp4p7yH_{#Sah$ihU zmcWA}M`avx5VT#GwKpGz$9+9N<;Eb90^oSBL{%PvJn+3ot^9w9ml@aXy}1e@yU9p- zTu65TOq&}100FGM`B&!8+_p{*!@)v%5s%PPBj%rw-=pHAw8cvccYebIn;7PBR>IYP zRn&n005PW&A6@-5t^PaO&Sku$2&zAMRaXOHXm#CC3`OPLkdGsh_r=7s`^MsM<)x9y zVp)wV=D&`S7C$LvaU0BLTY3sTSW;I0VMg(`)b&4>s&1xn zIwpPKbO@v2EUorGwRW4_ebjMXT*(Ab%IX**L<4 zF||ldf|2AC^cug0Xy5^nI_!{@FDFx1cX0@K!c5I6p>!kyN0C+0?zG)s7}owO0z6LW zUn4AW+jAho%)7=aSdsNKulwr0?$RM!zsV9egP=jz3X)b^h`0irhXpki8&TNMf2NhI z9EJyHQk9+y4VUfKH+bw~dxVVpcULN0kWc()z#mOm8Y2k-x`()Ws1Y3T%=diZxkUtB zO7ljrp=zCkt;A3dLHsm(Dt-0~qQ^yep3jQ2wvI78mu6S{rnL;PARa=u+N6C=H4kt{ zVCrH;V9Iw(Yel*kxq-Wz{!of9aBMuYJ0K>$Yh6N{<|gveE5sQktS3azkWqKG{=-kQ+ie-+2BKDl#ln=XJP zT2f@T%JvS~*e{iP7GEdw6q2&6SZh0iwqe+R!5;)1d>Yq%S;roTxQt_4bt9MFQ2u~8dS%?{t4GzO}1ddakn{{Sc- zBx4!!X(q2RZa@)(>0K@Ed&K&| z9y~z%(L@BpM@IYCgOtu(=JEO5t)cmP`>?EL$FAD3wT{Ku)oWzURtw;~twpDSpn~F2 ziQ)(5I`a4fvgL8&IY&oQJ(IL{D_-R9{Z_!&@Z&e{;kR6GlJv zUX=b--nxLsk*LnSEBw+0zT|2wg=F#$PToj!Jz>}`d!61ZKa$1v8Ex?8JG*28{{U^p zNq9pdTFJbu><8b(2HK#g(4}=T#}bDv{SEoYL;nCoSCl~n?yG0Xy^pB$dt6sq>9Sq3 zVy789&cv+3=4*$d8Oy5%2|`f;G~a6Ob*1h2m*)De4E;Pjm0m_{YV^ZTKEGqUp#PCL#0M^7e6M)4!+K`q&$gY2@$ z3h%(Wlu!?YMf0Z4IhLfN{{YCaXpgx^%W;v&v}-(pqjE%0EfE3yho9r7V;ByI1X*cm z5{WT5*=(gOBuzsc;A3_0X~efbw3Tj%;;IEJ2g^R!cLF zeNPEvaNI*D@`}{$<5gKi2)I@i*3#87Owqw`iS1GbV6i$HZb2*$=j*C+42KlJh#HO5 z#5c<-@P0uE7+r6)RKY zYM;+f&O*5IDx-BVag2Vkl8D_%jcs;9ywN(yrZT`5Ja+_B*FvuEawLt^B@oJbOhvp` za3OHEmn>ymiyB7JinldBpar4)bRlsd_^7h~*XCXJWA_o<#qTNR?ahx8T8CmO<83A? zWSwy(5&SvEi$HfIdhbnqZ@JKHwdJd9wRhxuP-yi`_#=)xG`YCA6$s>QPqw~x^{%RIp8^AQ zRpLaJbb|SVU{VgU0;76?AK>ouciR% ze3UNAjYE#*a^EM~uX`_MmpR_9AD%{WbC#NVQc$FnhA^!`8tejtN^P&N!wNXKp3N-7 zuT;h@^~+wJRMu;?;=5!QGqOjB#bQG>yllnpZ(WF0r|IGd`VYfNQH3m72QFvV!CMP! zxH&$N(LqQ3y4MJjPb6_Oku9Z&C8R##pi@Iyf&RKYxlX_UZ;$oS5;eiOL0o>R@g>(@ z`2PR`nY(E*y_YnnvBAFy2$Tw0lm7q^iB!^_MzEHDAQ_pnjXtQ>OeFmdzOS06$;U2C zlS`#{FWXt$w@mA1+IgA1>-SJztM(9XW@d7r*0iUcQuhoQax%Kj{51R*tFk;a=&Y6! z!#$HP-{#Ulgc7JDQ1oy<6IZ_o6k-ZzojkeH_5MProe31EDSb>`ETy>Ig z0Zf2M=zNlS{e$r#Yo#yEkvwYzlhqa!KCYu-?0Ec?{>CBi`!wl9RK?oKaJ;_kKjOs!_^C%C9aV1mPyAKtU7p_Jcb_9A z<*=kTvP$<7ME;z;!$Jx3U0LGhN|^!%p9Rx=Bz9cy@4pjwzl;YnKw5L%L>dEb%t>Su8idB;q^9!r4uGdutH{dw@6ehvaLjk=yB%%%#YVg#ABm z>9ZnZrk?~|ipu0Hrn$Vr*sg07sJuH5WJdkMI|@jN5e)JrYfcJ)jlAd=Q(m5-{{V*H z^+W*psSDoIj?GV&w(Nb93Ht|LnJzGWe&RQW6?%m$?L``wkj=doyQ>e%a9(Rxaw>s$;%2>%EG2@t^k<>WiC*r67|x?4eNa_JV%z9<&-z z_UeSWayAjuVfQJ{M%ihioa=McH_$wptin&piG~ST77zgd z7a@Fc97en;hD^W&>Hh%6G>G!^`Jzhb(H|8@``^j9+>M|1zxp)1>(_Y*lRb^PnQt?* zPW2Zzl>b>rX|ZjcPG_Wa@1P6Q~S5K0x>?`*Qh2D*1D9m&_wI-NVfnHkhqy(nx5$ zoy|*7Y$|phDXZ8BKy9y7sG?IYSBdOlR)zj6{?J>T9>xN?#+POhUD@JSbM-90?5Em+c2( z=VPx*Yy)nBYIk*5{#~^!G5H0Kij6I}2BeT^RGNMu>tpI7L1Z2Rk~qr z_S6!oSwTTp4PT)Awx+biM21;8Y?}DIcL+R-j=wB3M)M{{Y8T7F==d z5gkzZ60~4~TaKrD{{T|v?;Pm?^#U1OP}aK&5;R#%34q(LFaOZw&CUIw4G2n~bN*~} z-168F^&U0O1$aaamj3`>&SX07e#fz4Sq0^b4{c0(=jNo5+2=g0CYh8 zyMZUwHNGZN7*_2fFhY#p?Y$Y*2QWjZJLs@EEe1(^XE02QW-KzHSN`cR|>H~37O4UHy@j9;VSy$19B?GDIp2dVrh;(z; zuzhFZAC#`~nJjgk*SR!zJ#RNTNhxh5I3lYS5y%0l1XLdzYMVbNC%pFP*#clo#KU9p z@lQs`*B~23d3$|~{j0AX`z z%`P$ozmzu{%d4?Dps62fDk6{n08LipV!~vI$gofnj_Ju~+WRz?PjQpVS;q{#c`dtg zQLrQiVts!N3X#WWkxSb+%Rr#7`{okYoOiv4f&D2GH@ek3o!6B$b4)In`cMpAMB?sq z9ftba%dT~-hFx+pH|;O4IYR@51eWJ51s0prN0)B8Tb5GyA~Yd5vA+!bla_RkHQMaD zy^F>V?QqZ;?g&51(#TOk1Lz3Uq1hr&t5wf_G1)h|zxeg>&M4c@e~-EC*Ko0ae>pk0 zLjHMs1B5$%D0QI1?G?m=070_n**F@`3}wIVarUs<+ec4s+TB(dz#f4kRZ;$$YU~~a z$S4aJm?xEO`Amx+y2FV+K=st%OklWJ?7qH&DI?}z=x9JBSLvx)U%3_0Aru{Pg0pmz z;!8z&V}4Pm>E-_b?e*X5r3MI$N|(42ueE%8jLt`PE-Zx5-9|*v$^?K{VgjA1LVW2- z(o++Jr$)C~aYez&;LTYVedM+k+y4N!!SvIZ-Ozm!E>qOB zhnlYVJ6@``RMiwQydm`48qYkgR`C+Vo&O?OBc6Ugr0PxQ)mP8|y*c5mm)tI;M} z_cN2d`4<~^ExnEKk~uL(&Bz=eg+N%rrAZZHeQB$DyC?=A4mFX#Uz%1~P>5hPORmZC zf5<)G-HS7tlHv(Mi2|`wKpbi0`f2(*Wf;Eci%!2~frm8DPC zx@uMc%$pePCL?rXV~vt)S7e1{ioQ=F#8+?!ij5;}zNKYeF&Y(zClpPWFu3OE{a|lb64`gew^wM*hA}@z#GMLR_t#4# zqJ{YiFKK%7m?NQ<=WkYRl#$|szK2Y9y?wx{`?PpYvdS+JY_Z-#;xRap3F4-OcLZ_+ zMan1JO!o2*MAYr}5w}XXSdy|kKsgQCq1Rx4BdTmGBr;+jLapygG;-g7nJG%FD`Awb%-srMNipS#R_I@$zR*u#S19_5zQt^c$V-ssMM!vk&OC}^efB% z(c=Bp+FUy%U(vaCSit11B%aRALVQF4UmNS^9oFu7 zQX_DuZE*}niX;56_*bvtakwt&)oqum_M6Ro$Y^P-KarcP8pj;XtqmQsDUeNzyYkFrm$c1v%Ov6xXJt65v! z@&k1uRtLu1b{>OM#efjxcPT3*OxUYGXLgf_0F&L3Q;V>v-^Y{m)uls_3>2mSJ0fUj zcu`RZJN@5}PXpC-FvP{(30W|vg z(-6sz6QcJ9K>dpFSXoThmjcQO6#Gu(7nYllh$;L@*lMQx=3;bU`!2*pAUrQbn>mZf zW-{tzvdDz~-Df7uA`SllI1xir{J(~kr?%Kfx@xwXQEHZy1{9?^> zZzC4$SUBV6=p6mmP=);rr}gpw0PfYU1<>Wf^QS_euJd4FxLk_QcFxtU<3Mnb+D%jkTi(me2`~hDQ|Y!i?-u+adUOYVeY#)+w&x8_JmG=XqN&~ z+^p7?Mk%A_DD|lyN`5+pmTSwZJ#=y)Hi-8dieG}%~U;>yVKLX*;N!12aQD3E(BKqQ_Z9$pXAPeapDB_d(U{S|=031fvK4kTFR zK^b4^(5lgf-bVV%WdS+spj!GPAp|}0O`E;GD)2A1v0M?%n$;S$sTHQ#EGF1u^79g08SkjP_P14Udo8k~cNDw%grLE~oV%uc4;0c~&%^ zqS98;Q&vn&mjj7aji?C}1I0(Lis@@ZkjPOa61QSUF-DS9$Vcimtw+~h00QV_=!AQg zINo8hcIya~$qT$_qkc-HkM$!~UAAH~2hCA;C<)alp9N2CFKyN#GROl*O2QR|18hn2 zy>+>^E_B(K%jX6jZl{vu@V74N;TEKCN*^TGLNH_=1#SNTca%x|zq54?VB%}YSJ(aZ z?_e{s_X5BF(&nZ=;k#SZQGFNfX2{GI&DMYUYI#?f-+I$;!&JE)@i@cZN3z+ zw7!NHX-sTnivH$FKw-yqF>=^f!O$h%=QubCn(^yiF1{7(*AvNaYSJX?tH3*gIc_;I zuk#VEL%n2mWhvvj8}u6- zbkiB;xVdN@*zjRa3F}ax3f7u_9mkt!rwU-py#D~#J!pV=;KkoZ%o56!Xn+&~gjm znNl#M`d3I~$J@pxBUvZAa~W!+{xr0+TX?0ilh|k_FFcDwEAn6gFHZux`KD~8DUmIe zu1<-r%!JtV~LjD zdZpz!lcz+h$#%P|p6}YOgo$q#F%{YN%xG>Pf;5pwA11XGK23GEq9z!T52*_`bU9c` zzF**GPE5a4?bd(ruy-%vSvDQkKVgca$=`y@3xJFlg0H?oE?#^w`<2ELlW|PsnE;qoro2uj~cs=cK|l% zUfEjry`u+YWt#T>3kf+XZ++_Hk=SucoeCK>7d=;a75z(#OYpPWO(o$n1{@QDgZ}{T z3t#lo_S!exF0mV}42DPcX)y_Iw-J#A$E&34736s= z&k$sWHmngA^^=JsZviS=x*RQ#J_UP;DkI~YEZ54pFZ+|RhmWkqC>HU#;&M)#fD~+y zp#&OiPTyTv;?XOm>eUKimBn~t5F}w06Np6!0HT5Et&C}e>*7-Gls(%43CB$x0j#KC zbNWZ$LL12-SI4fhz1IRU0$Z`sGml}3ToV5PH9yCQ{yprI_nxtj{l8hyT(QX6MyG|P z(;B%-Qi&lvhtj&U>|M;+`c`U3im@zWs1rs6@gfQiRoK^BGB}0mU`&Ua$s;Uxu|n#o zsT~;?en;~a)ByC7Rk#4mMKEykoO;?tI&p^S9!Oe)+!Pi3=~{29yU)9~MXSRC8hEV( zEpvGpWw`dzMPz9y%B;u-r=PoRdusGah!c`u#Tj9Qw%6GumX`uJ%yvs&D#VqIK;LgQ zKZcRX0#F2Qv=~ngV2v3LSzTJEXUN{oY%buA?8eY?1HmLUMMXZ{>8EUo6amzLv-naH z%AC3c^uR5(qzmr{9_zg3f~u`32kj@)zCR6Sa><;4TI^NHgRQSbDOk?=dDI4VGxF?jwN)B)cdWlVBQGdUNQ$n&J@qGG)K2k zNpse6u)4Bv$H)pVf_M{O;rQyOYlMdG6V{GUAne6ef{gMaMp+q@@-+LX2_NWnw&J;Opr)*G5o@E9JSBwXy@nd&gsP<~S(vg#xM>&1*^#zSaEp)U2F5QsN|{ zX2ax6(ZkTxI3L$75nAqYLHG#7&rlQ_4Bm~jToIUX}8EmCTxn+nj^ zPgHpBfn-8l<*W!+iYP;*5G*lVJ5k;wYK~wNUHqRqv4wyQkmIJyv>)R(GFwlJx-z6H zw#$gL;N;b$=B2B|3IRju=T!CwnebJU9}lXnIIQ(uvvA@KeDH&I;4nrZ#Ro|^gn$tS*)R!7-E2EHibl>^4U zi-klp-UmSqheVgWM#)Qo`EM6z0%I;s&7@cC>yegpsYql8oJz`S2`0SjUte8V+1sVW z0jL@!X795urGM?Y*{yOoPm%F`|@xacUNkhT9E2X(9!-WTKg|Q6ky!0MUUYoyha4#vH^Y5w6R~OFJyW9JEBy z!aFN~E57vBYaq*&k(S{x9*dfvCHEUXwe#dxcW*_57?8Y1p=)yDj#+(204b#7RIJDJ z^hE>IQyBb-dzrnld>(GrYiW?$Na4aNl1Zf))}o$iL!{^i5%nUoscGM{T)a0oorW7* zOJxxV;&o{qH`oGc=gIZdC6gCsKPApAv)cTL)9_NuJlRtB_UNV&aN&WG2nD^k#Yj7y z#)}DVVr$Ob+AeZ8GOK_1YZoaLk~ZW@!ll2{(?_D2NHl)c$x1s!I@tKT19kGH z{f+iV85TREW8{7|sAz8_8y)FZ6&sQKH2AJOgnFpI5vu716CGz^kAl+D2;hP=2^vD7 z5m5yZReS&j)Yiib8W4yBuT+KBYKXvMp+N(H@#&|{d~`r1_F5TRAlA!{F8kg{p<($jRhgYS$Dzn+Cp=d7s*8sR5rD$a?S zdROL8Gme`Plf1CKjia9*aU>Tic3{XzBA=Mn?z=^>5*rl!10RBpf;YLyOyPd?QW}&3 zfOyxfml*t0xRUp7G93}-C57euvP`T+y`Q%l59OexJ$%zuc>{%dZtG*)};m8sU&!Ll^*ZFe6Cy z6xyXms-`%+O3^uhk^cZ{U*HIl@3J$nwNHLYZe;_LDzPAhTJFZCns*?yV;-zO1)IPE zCQ@z5PdiMEZD(uum4ZTz6b6e+b*Hv`*OfsU&s|=nYO0vzJ_uXCyn=jv!njzu1Zr26 z10TAi{{XhEJ5cnO>Z?1UK7~DE%XeT_IX=*lydGE*qs?jPbe#PYWbPmSgll0MzB*-g z3>p_#3}_s((Czr?3s_7ym*Sb1rX~CpUbyyAqu{L^(K9UV=U@k?k-pm5ROQ`d52s{j z)-o24l5-3##^D`#mZ3tc`nDT@chWh~JQ-Zz+zgYW4St5vQW-tVsi6xrgl=6Z3xzv= zbk=U_NILAVpdJMiK)}iGb3WyGgf;t*l4-ZoOV2{XWjdkhbVn1%${H;@7y}L};kes|Z_uN>o&( zX zCE~nb!6&mUW+=fRsQ`uLzn{xP6_#G$SO3&VT^Elfb3eZ{cBCi$0xV4%QL0~X7!z6_ zz-_OdEq?+Oird>wboVf`%!R#(qNzvsf)h{_?N3cX<>8{^sC%Dk+S%i~l364LLjzu5 z_#2v$^snKf4HpWFTO0H;$|PuQ!mT6bgo+Qif&e3LJLsa_P)?yLWrrsf#L&qan69P$ zy_Yo~adHSy!^z)oU0R=`k^=8H#aMXoeNzjEnOP*V$J<1a72p&_Zi!FeROt$#${f5( zu9oF*e8teme)!)jg|=1x<7pCK;m?vTZK~-Sn)#3xO8z>H)i@yRiA=bcRrlim0FSTw ze^k8BcDz`Jvt}**+nX@l+TOw^0^Zk^09GHV=s!{XHBTC~A+$%n?nEw9{{WR)Ij;Ky zLCtwx&+Z#_s8hLiBv*ZcjrFYT5onnxL>E;503j0;ie*vGLrDS(evwKV>rGVLvS!$_ z>yQ4{cQM<^@-?LP>L$6mv?5Kp29DgC06hwlb)4>`e(th=#HDWfS7|LnP*f_1LKuz3 z58I+kmra={rR+GP!O5yry)$3}Pvh zv|QN0imNGP4aJKFJT@TluGQYUL*1E^aUWu3q6UeriMsgK#~aHj+A?{^hEm2mMIr_) zzFoF@s5|XJ=r-1XuswRhA;CRC5C zq0Yt{C|u9X@ZG9GX>@?mVHOq)lDMd)0*#09{{UTBLj&8^Np4b_mg*fE=epgdU$IRc zt{q5@EOWUtJ2(7XX}xwNepS)M(1>X0ryoeybs$O$4)TcBG(86AfN!x{=6UUS~#ac#szlx`JO7Te}tAvx1 zvNzBM9-6ebRcG6y!epbIJkiBxI{^zXC-> zl^2HJs7OJk;QkuVr^Ix-jwg2{Ob~(_yxdwqZ4G1P&3^ zSIp&@gCmrTvma*Zh+;=CxK^Zk@3ytHSSMTv@L3x`iJVVDw6IqO%4x2nm_;g}jZuf( z!PqaK%U;Q>&JLLE_6SUq)i&*K<+r(2jRQ;UBB?bj6z+fD)|#QZpm$nmykZZEGqcIH z+)mO~2~1%engd_>tLaTwN*qp`Cn=9vDeP`V=2 z_(UgRt0OnZnLC)4K(Tjo5o+G`jFK|RJobtwdVQuqC`cZauZ?#b*Hj0Bsrvy9?P4HVhe z=eOkSzx~bLvNDMT#v5;KmNg8iB!tzCJRT}Pj{1sgan4FvfB4<$BTiYxRxocP?lLGjE845K0J~rJl9t?cIpW-(RA~U z$*mRV$ne?^-do9y46#ZhSQpDoQr#W&{9aeMM;!g5 zv|i+`WMZomOwhy}RPit;6;ZVZA*3^MBT;C8i3oVBSJe8Qwmt=q5s$LBl2zwC@h{%U zi$F^q>rKej1=mW$s+)-HtZ&A6?5&ix%RG@?TibGbUJ)2vF>ywA=+P{Xgxp3r3 zl47HxTPex)E5wc3H7EX_>+E1nH~WA-bf0R!+syX*5UuFgE+*Q+ zB$8D~uk(7;x1rXu)4;m8!-L7uOQ*82M69aN+q9D~Kcv!>B>MX4AudGtEi6NS{8T0x zeYFNrLh>uC&{e;Q^xJ)Q;#otr%ZPHBH@P_=!Ci~1Xr~A#NG6V?v&lsS{tI6ks}w!K zTHk*hc@l&=oul6^viotfij@EwP=?sk%_*m|XYB<^+u_SntOr)IwlT8%i*n-4!Rx`?Hw3d=^Bs7yhhEw*H1PcA6YOd(GT6!n$MkwY9Y_0De4|AH_ zfGyFJdNA~_kzeDaX9-MrFivxbkdKK(nDFtV!l8iWNdZtn0ST}1(l#BCaxLYc=%m&V zKq_|F3ZDRtKu?KvSt8PMlD{olB(_#J2tv;V_w^)m`l66<^z;<1HG9~l#m9=t{{SJV zdvKfTf$Rk77qs>QC`0pE!oN083qlVa{S7Otdp@O^0m43c5JL$zZ5&M*aG*ao`%7;p z>ZqpLNOe)*zyH>cCQE!Imp`?MZr(+U_X|kF5wh}LL8yNbua{UM&1!wq;-eKVvZ-@@ zV{W!q!i#kGATpYMu!QeYewERA@X;09Rj(b5r6T>`?kOt;VI4_!K0s4n6yH+;2w3U^ zIRv(GTtjIjwiZ_8vWEh!PmV9WYBd^z*vN}Nz?i!{&(3`W+gmwYeZBqU(xcRSkw?Kx z0DjN9P0c8D?m?Vr0QjXYU9-pH!b1xJ!b?H|k0*lBL;F7IkFim{|LPzHJnt4pbD!xEn3`(ao$ zK&@y%{69T6jW>(=B1v9P{kC$p_P;x3@oq@|_1Br>BhjFhS+_rfZ=}1}c-DRiQ@ZaK z0HmvS%e;J-i5ml5tNx>1?NJ6Zbzg4ir-U#=FE7c%Rn4L} zNe!IA9QrX*1y{$Vbf#5}jV0K9L4IgI$b7`}e6QIX-B95zpo}mD4G|KCqtI{T&V|{l z{{ZX9z?Ezd68;D`;{J|17Df))m#{G5Y-Z!vEm@1L#nQ0`Z>dBh&y63r=KFE>tKX{W zpD$Qey2q?be(e`0_pYWdad^x^usbzL*ixrixk^!vi;<9bDRI+K|E)i z_F}4%h^a>-dYV_j+-aO#xKGn)(H>C~4mu{U#hu~|)ru;}EiSBG*CSxWmK>C92a*Uo zX?)ys2|wjKd=LV=T_ZD!FJ~MQlD7(Enq%Nl4UK#eLHX*cabmqCVftOo)tPIlW)~LT z`HI%u5l!j955s*ogJhvFKuX;QRvyk)k(cZ&LRF8A&!+m>(IW1A5=OPWY9x&S0H4&q zO)(oe7hK}XWfqLeKmZEYP1m{{w?o~I6=>oMA3xnj%FL(BCgcAA4OHjB@{*%d{{Ss*4ZiF|yh6Ujn-IUZ1>%B9r$Hpwmd>GAmS73li2D62rJ^|(@K^DZ zMER_iHgctmQy1j%OYE#|LbM~8r9P&$^wei>>K}qOHBDIfsczDe*<)W60(+}Vmyju^ z859yKcQiT<5anQ(f^WHLM5E%gx<$+=%%hSxwkc{s;#sO_4)pvrKIH;+Ro$}20C^|w z3e97&S*{!QlFC#pwEGds3B-l53rzdsbzN~b^ic;>8hKi z=S8Q-AEu^l?H**exQ>KdIQG;6Xjs?FSHZ9F)D4UX@kNVHK%Rve;*Dg1xs~8qrAH=? z2>?)O3x#h4Z}jE}H*6ch(k;O?Q-fB)Co$lOf!e$>!g*;p?qjosp_5)_j_N9m=0 zy7|q4TUTlp#^v#|NE$q~y|hX{=_QcVJ0YVeL0SqOL)u3<4i;O?gfYB!$#i3u*z$X& zZfZGHb50q=8AGv4X zc_g^QVj;IxYH%%ia!ax|r37BSbX@x<0T5EwuEF8fGjD2<^D#*l#jDA`I_lpRMAiW6 zt7fv^x3bR^(wJ?cPE6qLK{P%+D@{~ebd1an!A?6aMgIVseZ;ml?kKLBV-yocl9N>q z=k5b~5-0|n?SqxUaEDYA9ByzOV;W?YqLuBCy30mMTa{U_4oQ$R?y zGbmb*HBEui{px1SZeyf^jj+*vhI%E>?yWCps~%(*D-b6emkpc)4CJqKV=oA zyjXl)fR5J2$x@39F$x;g*TD7lI;Q=|fgt^7Qn@mnwtpMR3=vv9(6mzW7I3;!j?MQD zV`D+Smz|HNiWrk2Uf1#ErN%0GBe-z!NX9mk?e^&Wcmkreuc`B;#oTo8PE~Vh;;9dG z>K&7!_FLY?g-K9c5&O(}AFX2rq@^ji8-w-RT3lSHT@P+s^i#gBP;jhQ#y=usvzY5! z%L~+Nh?ISYV9qvla4W^f(^Yv`zSM~EONDoSJMMz^1wXu4r?g_mASl!u095a>)qJPz zu=8A;94mi;ziFYjlbc9ojcN(4PoMntv%#MzgXF9197;1p5>FhU@qYztx&9h@y8T>~ z6st(8Ti4QuRuMa)$ZDf6SW;s3WmV@Gpdfl0Q}ESg)^G(*w6Dn7B>0Q#XXLym5`kKS zLry=(SM6^ zhv36j-Jc}eR|C94zrwMLDvgyPjT`zX>XJuOQ%*Yy)9b4$mGQGcvq5n;?BWYWSngIs z7(e=yJCW;3>xLhyehIp4lWDTY*v;IQZ*g-YvtDu$&cd9iKH#8k06Ttq_84NPoJpj* zkm@m7bCw&}YEsorxhG;)gp*IlLCxj)P`iH)A^ZZe_)=u#}Nf&AA{q z)}Fe^TaIF0YgJX8!zdopZ|&l4ZrP?tCFY!&MP0Wq_WlD-VNcc&axC;Aj-Ii{ZN(-i zsP*P4Q?+?Fpxefn?g*W}39DuyiZ?N{JVh3&{{V9|rArb$0VLN{(=1ic-6cLkm2EQ^ zxv}+)q=INBomcE?FdTzA8WJc57$U^fTNHD6#>Shf=uL|C@FYkPt3{{Tf< zsuT`N0-69TA03;$@Z4>tA+j5H#Kh$l34N?fM#OX_ereGt)^4$|nI!|vS z2RxWma@bUjfc!LpgA|po(_|HwRi4H~krmch{iRxM#m5pqjeP0+X>pJ#d+X;^hlo|{ z1uNN0CApND;SX(d10C4J-)jOqQiEE{tL0cL<>nkLnDb6WJZvSsmV1ScL{V*Wc@<{h zOE6uCP*;s8f74X|0Q{U(m;cZTzr3={W8s~$Src~}w0E(gq)IswN~u0*E4u3e%4*uW z)J3mhhR$ZiONL9Ec8yvLl$PnuK&bsh5B2rYNSS~{hB}FE&tY#PIFho*AG;hr!7-~; zsV&6N>~t|%DT?68dj9C=@$GSMFD7}4UPs5=#-wBZArvG)UtJ%}=2E>gW*@7ul6h z<|tR>yUCNR=I(KltXI(5@P=tWLbE#YB-i+++6XO8WjDH0@SPs*0NUGU7ZZ z&;I}#+Hhu2*06aN6|O%BbM=-a8^M~=qZ#T6xa)A>j$ z!D=c``f9JXUHg1-=BusU8q0K_*>9$pk8j#ED@uwFJAE~IS?5VUAp%Khn7w!CnP|9_ z5y3H#{{TPp5nA{=it2~BN^ruJaoyCa4p$pGy|d)LLirabd&)`V&aITjgo5zG8m&#o z9!8&z+E*;g+NvYLSl1Q`;dB(o8!k}5vqAg7CYCx-vv^|0l~fy(>U5!Fzd1BEEbib4FB(xAv0IJzG;gQ3XJ+nj-4qfa4e? zLH_`Kc2&45hz}``j##c*DLs~UGfBkL@7jce`D-N4MKam6Zg=eF!kJpE3RvSetju;a}6FS`JxwYhfS)RdYZw_*Sb z`JGE2bbjdut)HUY>Jvn6s^Cc?$L+=%yP8+i>!Ic#VD0i&8yzHZJ&!(V!rev6&>C<% z0CyDi)?VFWw6$oWyH(Va;PO{(AolDKJZeGkHR>!RX+?t~Px;2~NQ!QI0W zMGPzLi@pg#O_;4cDg{8(xmB~eo(a)2V!4{#mc|wLnpqN3z*mDI+m%HrLOg1eSI^X+ z_X=Ki`->;YVeX`kD2bNpE2vk*VpxDG7QQM=ej2##oc+1S%~tnO&T#l3IPK#_mTQZZ zDj11iRr{#hDFoJ=)6T6YH-e?Az7io7V)60M3eK(VM6#qV%E5_>P&Wms#BEv~A$8mn zw84>Bv`l@=$893X8v`UV*MD!95Ttt0dYvck^H#Bo#L3ZSIw4reD1;at^m;SW3qduK=Tfso4EV{OP6INm~>J;-QUDprTL!eMts{ep)qTgdw^`OO1gu z*Gc8gl#&wQ#wq1$^7QQSD_<%^HCR_U4VHk#CK#jwsA-c`y_t zDH{;G?YYv1TJlMLRsYdXCH>|6d3&i)YcZzx~A5Ha_rRk00C$p!<(D1TOp6dU|Mpdzzh5mqtIrez`2a7QRJVM*Y?CnWb=9 zS)&1!)SbyI#BZv+ux-f=paCb7s^~s0aMmym6bCDYe=_7_?cX`REkKhW*6ntWZZ9DmKs){i zTC8dNIGnsfV-77RS+Y-OXB;^5J3!zn39aE~3RF}C27~e6NLA{2Cy&&Ym$2AfCEMO# zxs-Rcb->KAL{AKy)cdiWliOW*?V&Bv#C3{g!scP{%+dWsor|Qh!&j zp3R8;jimnofl!r>7wIdjy+OYq_5uoQ0BK1PJ0ycOY8#1oFhbLr+@`bbe=>As)qjn^@saagieayb*qbEXLBut zFvpLGDstjD7N^|Bhn>No8eS8OdTN6uDQRx&l(3!h%`J<`Bb%9i;R3MSDv#1DKsWFP zrx%oip~)Hx3m@4n-Tm+GtgMNYnG^((y&QNuR+akdcEoF{^hT<@cl@S4+a2u5oT^Ue zj7aLiSdvu&H0@me=k6=7+`v&lMcg0b z>8%bHAt#iPi42y%5%$ZgT)tlukCa_SY=!T!vB_4FBp?}KU}#<{@o%co>eIPm89+$% zL5T~ZzYlkWg6^Wg{1hY-Q z1}wkIYrO?F)rAva$r)~)Hg}OQL(FbL3ViM4>enA8A%RdZ3EoQa{sF*;t0l1i|Qw8ITeWg1o}J^7yZ_?8?ZznbI)v z$G_S@Bzk@tjyZiwmvs!m5_DKzOoAb=2@!xK{b-<*>Eu&OcMBLMaCQmg`JrK9HLF#n zjH>&z4apTswOjbpS^DYs7J$#x5VvZ`7VDSR!lT}jINXD=;?)5DA3>(DEB$pm^UjS{ zo)9Vi+i_^3OE$0WNg%T_d2&A^{4`9BIr8d*U7UTs36huh5-U#ym%Yk92}8Fn#ZLa5 zYpQQ;x6xg|@*1Fy*I>B}TsPu2wjR`XHxYqS%~KOo#RX2Rux=5!!PS0ce8Bjn4a;2% z$*!u>CAf|!R~tGFl%G#cXh#EBS=oug;u9wi1?Eb|2%&i6idhj#F(8&sTtOgsD%2I# zE%SwPFS>dIO_L66F_tTdB9Wc#Vix68e@#(9=TS|@^!)W$GCe_C(B9b|OYCiIAczHi z6S?-DN~z*}Y)RUKUAjKxkbTn~7SFzu$X*#avJOrbv<+RUs2Fh3LMJJ|bzY(iG&xRfL3W!|*!OVcI7_<*-;E28*{{Z}9`1lf6$ut!I0DTGL%gIxIRsYfm&EfB{HaATS*0!b#S6e$~bz>(l z_)3b`iS*VHKUqK)cTpQTf4f?Ksib9e6%^K8k8-K2roXIJO76}*I?B4bZh$fSZ$b?xK0i-|xTXHuJb{=(Qjducz z1>3*zhpo+k_?HJ3szi(+Ztaj71tt4#H22)ezfHd*2rFCbIFzyhfm3>}S z#{7%YuP0WHD}42`2SxVDP&S8mE3vspr)-anII5UVu9~k>_^%phUfPm zx3-etMNUras?n_g6rd+<12P8MrZ>^oq4h3fd_8*Y+5Sk$TwjYDtG>;6j-g0X8$lR6 zkD7hd0D9?ct*ie4@lFwJdv+LhRXO@AS!R1S!)JGk(8 zgqbXanD>u_@k+mcnyKV`8v1GzEEgM+Q~ZbWrZ25xCVYhPtGcX~(FY8wNT43wYRn1L zG3u4lV3>?pE@*s{{y?0Zw;~zR-N7_?hdE@cZioiTK=m69YOy;v4P?0@LaezArhhvX z%!2YPMn2?^b{}*t%fd{WyA;C>|~d(nT;9YQpqD45w#c-^dn2=&^^C2S`Mp( zU;Bo1De)bz0rDlTIKu`~);w-psPi;2lMA;RuiYI+$J16n#xgYa{1ulzA;RZ9lV zU$gF4LMkW*=l=i=X|1nSV~LPUDJzK#l30Qakw~PtsUOWk27ypuRfV?moJYYiI(|kR zMg6_dnNL1RaSKwOX+<8tTKa0UHotlV6>*M46Bg?BDPy)VnB)^9&i4&ZxtsvWR1~6- zQL36CW1_r_#D;~?_jS02ECG(0PfWMe$%FK0XLv`OF*N6TKIeDV#b(Q zh>Uv@l_!fX$MC7Hr+s|pi;pM>Y7t&O;$q7za~J`TL&ic%sAKx*4QalsuY?BS)5gWU zJ=NrptZx98*48;;R3rL|%R-EH`igI_RTznKZPYA0q6$xDC9UZL?O73{ka86zXh>i_ zjC|`wOK}dV@)}lh2U3pu=2AFuR~9)2SS~UUr|hp=Vx<_MWCLm_1AS9>8o5oz!5*O_ z)f^+=v7RfpllH3=GRri4l_geAN$&A9KXO!QAZsPMm1SKdYRl2d7X7ihlr`Tkw%wF1 z0ALglexRKeHENTP)TnuG1)8blBoX_BQ4IwFB&qxdr`J=L>S%f(x3#xbvul-vu#C4R zZ@NhUCY$ZOJm^x#04Bx6jZ(+tS?+DFaMy5-Mk|1vhn5euEBNZ?v;77oWWSVF(1gOo z*~s1)_}HjeEcv~mBY|d&otwth4WA|-bt zaHL`clu!jy06T9{zN)F8?SYJk=2hl?=AlFPF86x{&O!?bLgR{{UT22Od{n7!$ci zj6jR3Bk<&xRvmf^;IV%C@qckVRaALecWF&okF7`W)t7ddo!ojvB1b@+d1>Liyf*XF z#7K!f_qQs2?7M|k01_+ukBwDEqqqyCuu2~Je~ydyO=oI_?GP`#e(pTM2aPLME#My- zv+ebnI*eppBClnR(=mt3)jrFccL7f%uDWuid#F6dStC=%FMg@0=4m`tC)l&f7*WpCOpFB*Tyf3yX}7@!R<*E zozG-eRseET{j4mkjkyHVaZOg=C#IkO02N|Yv$mQnZH2AGXZINiF654Dr(P;iHsBL_ z1s=P9zM}j>13601mp(zg82GW>dcs_uPWDk1wkF*fnrC6K3-klYBmwmrm0;LdmI$z^ zwLqOC{8`O+%caVEwV!C({{V;`M-^}>$By5*m{fo~5VRkkomodC+dL_V^C}a#jT2t? z;NG3o?PA6C?zfDxwZ{rhQYhp#3qt(mR(B-%snR#(*d+YqR7IqqYmbDw-w|~^g}u$L zMzO3u(mS&=R0kBTYoW(3!LKqGjiuNsq%;ck{4?33!ewK1;rnuAj(B$aV{^oh%nu-G7+MH$HQ4-;_8k?|vCarG zCn%DjM2Hz9Qh%tWLjAN9_0Vk`BqR{yvatC4&Aird`*#JzQ3e(DYI@Vb8nQP;Ry9x~ z)UsYP-r`Nfbc%WzK=tGZ2mQ6$$Gu-Oy74-w9L7rdXt1qo#fU9RkKwAT2v(+k zmRUczOA^YmT%_=R=0(UPl@c+K@rtBS{yTWrqg#~|Vq0FXC=fIvms2;o@tU%cK*F^0 z!+lx6^zc>p);^)S(zv;j+2f8$WDvl_LWj=+PQ%a|`O`JjBp<0lzRzd^bO@&r*yBW$ zHT>$uLYM;*Bve8dl`G(#g*1*vAbo%-ynf|a-bGJOl(D=JUBz&*&1|Y37B#Am)427i z@t{{xi-vV8Ef$_Y=(hg=xh>zgA!e(RsUdD4l_IP6cpjrdHL*kt%(o+8BMez&b>qfR zX}H>y)MW0QoMJ!^(FXVR6JTdMG?wLV8Og0EYCneFH1BS4VxaHziD7}^stiWF_T*() z{{VVpqsd6bbO@)FE4ZN>56f1^>I>(hLxDXP|J0xzv*pah`2~Zk-py-iD^C>JCA4YJ z2%LcoUgEJUQC?q`sVKsXVAg%=*SFvcLxCim%2;J{xf_huRj)EP*DDiF(&L$uMH?#d zET92Ttu)1X5w1KUVgl09fhg`|fALzD_#4%hR=CDPY7v!;k8d1fbKr`dzmB^siq77o z#aOcU2%JCx6Gat-Q56<(Oslkvk)k0K-9cJxbfl*ucV<>z&3sQNhCkMc!|~_M{{SHT zdo`?B4D7eK%QpUwF7kG7WDWRR&$}q}qg8(arLM-V!15ZZT#my{&ngLtR)Wv`o55W0 zn>l$EmEHJxa#pjW#K`1^AV>$c;F^F`kFKk%?Q-IMvZyj~vM4-f7XzqO)BKb99}C^E zxW2#FuG-;p`4~mLv~5xlSKS0FHr}A=?%9JpYb9*MFwmuAe=)_8CN{-!7^?{*NNwI5 zcw$?j5+OtY1w{_rc@a(NT~*sJa?(iAICcs)&dw=vvfoKG_fjb`y4*AVuRK^UEr1`9 z)Qx~ehKq!<4*3fb%^T+Ck}D{!5GvyuP{KtZ0>|zE5a1Q6@HBb{{Z^JaOzdc#OI^Tb2pQ{j1nME-8#@KNQJ5uc@VBZrqw>WT!SFq z2peit+}H?k(%NKX#Jjo7reaTL+X z%13ha5tSTCU_AQneRN8sB2i$une)aLD`On+$0Au+uLCe{MING@WCYQHp&M1X6)GPT;A#?PZ$3HNt*(Iyum)!S{=lzMydRCESf)WWQ?>2svMx}pxrOB zOLd5e?i+-v9b;la8!aS5UA~mnZKQyPLn|xMvqt+FX~IPiA}Yv1qdNe35Ir^nQ3T4k z$ISjKk=&-HGUUq~{GC=9WsRe+jxqztRQ*WR<}uAfaOXPAyyBVEN*dE5#}J|Zf)SV|?fUhd>6Ew^Fy6#KQYrk!NUxfmdR@mJek#JA?+W9(jn4d4^FPP9tE{JG==lo^ z8xUrjE(+WvQ77{cW!R8w=9*mlHk}9x&Yt%Hyn#q7-oz3oSeokMH4aO0iu_6yy~}g@~&7pr+etzh)jGArA3r<)PHAs4g$Bt_xq; z-7H-90vBdr5Bqu4B^X3#R5CNNZvs3@bK-J0PXewVD|wE*wp1`#M+Kl?uyGuqF+|R3 z^d$azrnG8A^&TlvJEApkvwr^o6`Z_mS8Rq1W@={kta~k_2H6@$*;3*TDMC+>!}2;6 zqS_!0CF9g}WX3*imB@7nCp9#^ZzV1#KT9c+K0&HkCKdZ+jfHtB{{UY@s|0#msK6d`7Ie0O1MPo5dF2ppZ3;782CnEj4ej){_28P z_<8i(Oim-+kM+@Gwf0}Hg8$X#7h$l7Adjby*obWH*x}sB{8xi} znaSRZI~yxpQb|CKdokvUN7`9X9w%HC!m-Ku5xQ!-@^T0iv|@Zwd{|iV&_^}yN<5v& z`|dAa~`cMGZenMxrTg06I09_}M!3^X?B#Cwv6$eUWWCqjCPO8W{E?{(y!EKH0zbeGD+gnF@ zpO!Tf$^AkVDgrf5=7xvstp-(7G*DY;0rN!#oz9z!0n$70tgLy)e1$B3@LorOWs4K~{W7Aw&J zYByVO{KwS}GB}!c#oA<9Bt&8zt}0L79I0g{pzq`9piZ*PLo)FM>ZL7vTn%emND>LJ zC1SRwQ!Jr){j5zJ$o284K!8~ab(5;ze)-p7wzss%T|6>cuQ~5yKI5!(<~jK1Ry03N zF)Dx?sDMvKQJGHdfQ}D)87?h1Hw7^r6>0f%A%VzZ zF4dsncF}URi1A@W+%p0Ok@|H*;)KPN;}&~WcPx@ zEwEtqAo6trnr{+vnR0YdvJ&u95km|TMeNS2CA-s)Ey}R_`G4iBNro)wLuCQkEv3xe zPT1C4iEK&-w{R#`$KIy9(CV`36Cwtv!(;;|RuCfIN|D+QYOq_-u;(G-6}?3bDP2aB zE+xX`)W5vgghJGkHd-Twt1T!Ee*kssCc$_GITlJsw~^71jw6#1_-{&6M5xGz14Ysq zL0;r!c%*;i0*tgEkIJ5P8Cu6sxP$>|WherqTNOKk_3=aaX=6;qoL7v1QB4Gj(llhl zAS4h^Ak^_s;iJX|Cs4XnHgX%hmNw?r;sm(&ki#v#IY%UHD9A%sVht#C#jugpnU|q9 z*>czXf%wXPjeRD65o_%AYx>5P$fT{T=Cz1<0Z0{J^wZdvM$O4rI5xq!%TTKD$hje> zl*Xo;s3cJM*idT9%3?fKwL$};#+g;Nda}mK2Q_9DV{XP2HZ#FH3N)WRp}VrqMNXQc5CZ=CB90w(!(sWTVw~n97z(?@h9(IMv5rIbNFah z+Hq^os*AnH zx-oboc7Aan>n6WOgKwY&X}QVVHO)LUBIC`f}fNHq#WIrCHF>!;_AK@JoMT3M;@XT5_t zJo3vm(t9y2x&s+8{lBPvppxFdO-`9bxVEmR7?oUex7IV0wFs5O+oS`lZSW`gMSt;cTf=pnwhn16@mGl~HCC^7!9H$Lu+B zI>5Kf{ZxLPw_LI8UB4m!0CVT>po!8PmFry#YlUdrf6`fM2(OwQeJ-3RnxYRtkha-E zOhSup=bK@1{b`~7;Xdh#W(A8)n!Q;$SLjQI<<*hTEYeR%OnAZ3Kf&OSrv4R_M^U{yYdkhWu@HN^bW z-Z9X{Zv@vtA(N1v629?prD^Uctq41fBX4G)>g=X1k^HEN)8{O*%^=BRE~mJi1XIYB zjhv6bnhyXP4FvXD+jZkPKFVJ6=K1EfJi7hlHDw>TS;sVARpLn$BWn6+1GB~@;~>29 z@IzPs05D`OVUrvnxNB>PjIzZshFL{HK;Gx-sHobs(~#{^eomrgB(BKjZTZPN$8c`l ze)!%X0Ae_r$XJu&zBOf4QV-yvDV2A>#*dS~z2aM~WN#bycxfY!(%M!$p3;Ydtq$c$ zD8t1lb&v1ex7(Mw0lGC-IAwKu?pASi#vvW$w3UKKBbcRV8BIS5dg_Q&S&H<)jGQYa z(tEUth-fl$$7=SHEwn{KuH`)&dUo{50BurQMUONzEDyN!^)wXmtk-!b z^y=T#r5jAiDr33z{Jvmy>|Qsj&s~|p5%9}8COnLmkN17I37T8;Qk|470cY$Vu59V1q8AqyqAJG zJ*a5SHo^BHcvshR=S>}wM#0DyU;o!hLyu`}tfQOUOLLUhMi@j{I~W3?au~Q?Uvns}HU#cYoU1BDcZpLm_55;Jn7Y&XLSnevV!C>qPE}> zzP$?wmy+TVhUqQrQu{53_blW7jiv9jW@!r+1n_OZ1bsAVKuENDt~2PQu{pSIF&P|f zoH8}gLT#Sn0melwP7NCo;-K&6%``dCeaMn@3!nPaYKkSc`xMlRoj;$ zs*#}pR-n|KU02(=$D8b#!=@IK8#=9AmGWkww_A?Od?L1ugs>8@!Yf`YNT^<(pHHs4 zO=h#K70i29IT~s6LU%b#J}T4pINKPcy_kC@;Y*fguZY&W8v5y!TC7h^0i z`MU^i?GFn{?j(xdu1 zM{=w7DzwrDD(V7{6$g%+r#^C@2^MhZQ^tg1Wf+3U0&cO?`<0xqLR%A(v}akMxD>gK z=m1tE)zo>dHD&v}j8g1n5|;gq8L%LM8GZ>ho9Y>UuX!YQu?w39P$S4dq0!Q#b*GJJ zaeIw<5z}>*$?bE+lQYA`d9+IjOjfW$!GYzF@T>IRp$D8M-=mDUTUHZSA(EhIs4WpM?nARq!6NjsSp z)~iA)Dd$yI_Z~yiQG`T#EnK%*yT?x$Y*s>4xkr`&#JE%jocSuy54-E5W95m*mo1bh zuSKLjCd=K!WpMXT8(lD2EgBN86w-#KoRrtd8l1blVd-FiX!BAZ{{RjRosGmvX)Vx{ zx*M5Cw#Ze8jK#sp0f^s{5H$nbVov`6B{3~ zDxe=fUpivG<0CF1LR5q!g~;VTKkS&^!C+-*ZRTTbF9t-pg;)m)PuKm9p0c>+R$(pD z@&XcFN$8&<+vmFeHuD{SZ!Lw;E~~^Zu{4|gtPNI&V!!+~hRDa25-~j##~J7o9v<6q zY@fMlh%wRS0;H^quxfgOJ!$mPmP6eP7l-NY5e>h}a<#>@u(GGJ6)xe85kuE@W3@@! zW29w`IE@j7kRj0}m&s9CUcK~LMYe)cP0fn(CAks6p*0IrUtIth!QB|f5{`+{hWUcp z=GE{rPO8>Y7PXOB5JUJ5-bUkkX}oOn$BfEebT*+F#qvFzzx1))+siOf`=~A4ji1R= zLPdO3>eh_dk;H6+I^_`LEy;VNFxtTc7e)tT&NO$RF^|-NyowR)=TZ^kbSPt8*U@$v z?=o=TqpY#}UekTx(a-9rJ}3^xnrKeLAmlRfQljEpdxHtJ_hFii9N+`Q`l%k8?8g~6 z3J#<1t!<`?b!1p>pZ-kS4lPdy>As-Bms!%bTUel3t*?36r#59__SBt=D|!=MsicoM z?iBci6>scVT&=o4y>MPWH!~=3;8d$r3I|#cbr{+?5)}B9Kl1+o#JEqJdR9}b2#uipI>fGmOwuP>8nf`NFGk38zmCm7w79g9$T$va#$SQfwIYBZe+8_ zTM)`tSeuJfA5Fj0TMT|PtP#4Dtdw_Col<-~rQ|YAjQ;z7D}bN5C@oQ$wj{sX>*V;- zox-^o?m+TVi6uEQd5op5BeMSIvS4f)?%cds%M!DakwUTiNJj-6ijBaosYO*v$h1s| zMiV1IrLws^uUwxC23x-0ELn_lU0FKXs6~;;VIQ`N#8Z_A<9BtEYxC`6a7hY0w>29hsHY+i^3sF(j;ig6k1M_qqD6x>1<RTSO1?+W~soVoJ5-|IQ;=ClG{{YhzA5-U6726C0F zA6Ayp^0sl@HMObXnCmjkLn&ewCWc@+G)6)L0yzQetMYD(K#lhPz6ocCpV(Lb(5<*R zCYsc&w?%It`{RbCBvlnnvZFoixhkdEze-bP3r)cqEC?~xNW?OqjNYViM|X2I zquj?Bdy!@mG0cI@%~xt2Lz5to6dl2a)_coDd#uE2t1Nejj0lmcsuTmnY&`kYqbJdD z1Ge>Z^D;u)@=2%33Wj@uW;rH{RgBRjXi_TInBhha=ZcFz!?PzYDlr7D= zEuxhq(xCtbfEu0a>#NxJ5llebF2_T=C_7JxE~US^85`S}FxHa@=f93rA;Tqk1H_tt z^3f;V;s)BO6Df5phbQaUj8}QKu*JgHFyqJW46iS|(w)jxszBTBKZnytH%fFogBfg} z?KMVu5z|4gsiX2IV{BJPwrR+=$z}nE9|Sb3A^l9uJOB-U8tXgSF+J?5HlinAk435P zIl_u0#f*p_C6lRUt!}zrBFEeIp3c=4HgI_o2_>U7udbz?RH2b6aeY}A1Xnz0a`1JF zdu(_b2wchoRve z8=%bB$F?>P9Pg6<0R6J8>TX&vtZ7#v(2BJ@f3}*dK2aQ5bQqK^+PWQ`?k)>QhVv)x z^J{C^QYozi9~>Gmj@3BB##19?=ONia^K5?osxC4|%`t8#?}~ws z+NP8wkHb@+@#?3cW#MNXwM`rc`D=4!97t^5-c&U%z?MH!S%L681ElBtTo0mOdi1Rl z-8XE%%bYF6s@r66P+nZ|0A9%~j9pIrT0gWfricFk3t-Xy^&tmD@qd(fY=JW_4~EJK zNgV*<2M~lef%xfZc6$d^*78Oe&+_ud&RA09H2&ZJ0Q0@iB>;X5!+jPwR>PyE{iQvT z{{S(1t^JTlt!^z?SKp8_NL%n6814D$Hsjkajq_aKRC9ll{{S6Ii7n*!!NTl~#DGYv zdXfg-bg;p+@A$89DmL%@hxpw>h^}s3C1Js1bz-N_CI)~F78uqM^sbIe(tniQYRwDV zW^N+n%R_LnRY0cIMsg<2ktFUo%x@L9$$!sOFy2&@Z zdeC&k+gyKDq-mCrEj*qB!TQrx8F+0ITmy-HT@>RdD6b*kBJ0)?UPh4I{DGOQQOi>B z<83S9E8?f=`i|$*NRHe4iTh_tW7EW`8EV6Hss8zX*P|nOl9KBu$cZkpBi%`mnvk{q zHT9v@#HfT}#v_bJMBPV@pz0Q)W!g-fx1YDOMS?1ub!PTol#BMWQoDK7ziQQcZ@*s+ z*JYVxuNG

C;vJ(WG|y5e`<)?&BACHP0Rs*&$;>%lg)WrMBE1hpw^nh%nt=*BBnC ztmZE`Ja+aO2xcx!gj+m|v8K#H`%3xHO=HA;s(c5E+!?H_mJBjDd&RdXLVHk(k2EY< zka`al&`od$(n9wbsEB9Do&wpfUe)Uqvpf>UR573>R5sM#l{V8NBd+RE;#95{MYOQU zUCj)aX%t{3ox@7u*_8{I_X3TpPtLjp)rm`R7o{$WTOE|UEN*7Pe%JSO;FG_*3L5>T zVCC>^{0J5!L13QL8*vOv%o+`JTyc@z|S*Z=AI8qs=13 zXh*clX|SbhLti@7)fmdCInlcKrg7~9p?A@NI;|ecG8uMDoZWGz-qtH@6`kiOWO%~W zWUKK6kUr!2YFNU!AJbx83wD{{Wko^&wyMmeNe+Df03QWqkA0rpLrH+}KX-~4w8qZ@+GTN>6`MCal1Ooo zy0p=d=8Eb1sz9j}`f00W88DDw0-~Z;#=|Pyo?CTAmsuQ!UMmcCM-G-*xv!&pe2x2? zRaGP&B+J4zO^s{m`0BVq74tAI28_2x#7EYt?gK$1DA>Gl;oBSBVE zL0!(@O)oKP>g$|7)h?>`nu^^h)Wj=JV9FDpv%<8>a2QKcn{1z(4xz4 zCz$L7VBw3&t*0(gnW1wRj{}R>2*hCu$#N%MA7d+h2~8Dl1eQ~(qZpj3S| zE+e8Pd$JspEiJ{o#ym>d!ypT8_N&N76n&0UjS2HW4(DAIM9IX1{834Azlhl?}y10TX$95cT;Bj7jKb=|0 z8*7#GKTCz(27xax?J$yG32tw35W($76w%!KTxm`q#uS$NlEZB|RPF;%e#OvW&R3kq zE2-JMcNVz_GI9_KTE#NP0OCH{`mJ}boe~nfja&sGD4vVd2d~X;@B$JeDYaRMm-}agT8!!MJnd)mnN)2?I$BOe0B`=J( z#?O562(a+l!U2vetGHCVQ*t=e{{RgTO3y#BaiXQ<#$e_#$g^N=ypWMyhCBJFXI9(k zu1?u_AF+3cTK70S0f~cUFyQ|HJ0-+pYVEf&X`%DG{oi3w;U5w9sXJ?YUA>~LXAv|> zu#!1qV0QX*H5yvMZ}}gvsF4s>y32`qg355I6lqEbpyI>OmfZN%#E*jKrC@u0kugJa zae|Htmbh0I;M3^D$PGV+i3#KnAF!x%J_{X}UigagWSVT&7Q=)pKW>*6g-H66Ta_uP z%IvN_pnk%JiLx+t)yG;};r{??^CsBoqLwZV7#?{CuM$3+X?o{j)VTQ}lOze-I{pVS zZ)z{E@L0KL4!0)${s?#>6eRxuQlJKeSMl?kM3Pk z86yR));!glB!Ey;k(7Xach%JymXgwhzQUy`MqWiOrNhsUovmcJvb(s7K+xP<-&;zp zK}rT!AnmxJ(?0(IAm%6RDse0nP3@)eznV$pLo-miTumCuN^BSxsM|$>h>32yp}d{e zKil54JY!)Mt2f*lR51d6gqjU`;x%42Oz6k#MOOWL483;#S#x$Tm2iUA*jz=$2$4%v Y^8H19d}&!^SZoaw$KX`>hTm2H*~b61xc~qF literal 0 HcmV?d00001 diff --git a/cnn_class2/styles/lesdemoisellesdavignon.jpg b/cnn_class2/styles/lesdemoisellesdavignon.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57ca08ed1e7897336782898a228b9c43af00e319 GIT binary patch literal 181282 zcmbUIXH-+^`#y?i#!*L6u>k@?K8k=y8>J~NpAi8OF^bYlMp0S_8Bt0g~z^u=@|_ z*v~(H{qgIbAI^e)Jodw$V?XS+fWW{!_x|tnfXV)!R>@(?`|x}hl?e_lMJLN50fEpk zMG?CG@b;Wb$qv_F%=me!cOpa(;vw)Z%5VQtBllo$xC==oQj4LAg6>_=HELu(!p+d_ z0W23kOHq+nDV#PDxAT7`zXXD7i{zBVYc@;Hkbp= z#mhLP$z9NKw9VQIO`+tOl&c??_BGsX<6)L;b!8Vs^}nT@h8oOPMH?)%1fF;mkP16) zZQ(7p6R0%PzmAH_x~&3c!|TR)ODbZVfb)G5{L?7;jY&uiD~u~F#KktM3;!0UO+qzY z0(Z1%EN<~oFl-e^^$#lPC>swwdKJH%SusAu7v(&E6?}jGOXMWnt-ZORG>roD6~kJ2 zzcq}b=5Vaba-?iu9oGuu?6TH1VaWWLi7*B)-!bahvce+R{wb5NKV!q9pS4=H<<50| zC@%;gg_FR_46R^uoMI$ykKH7U=t`A9UVmVIkHGaHrBQV`Y6Z%8S_Vgl9D1^>?PQN0 zRbc+?@l4TdjRcNit?z#a&PxZSF=1BW~;WvFgIx3-5c8Tlpqkf<$JO4YQd>h+Z3ppx%EMnj4$tUAoDP za5@pbSb3!J@GN7S7)O$3(!0n$YU;sx1H3@Nv$nKdkipwDbDEp8^TaRdMsC{6iHn-& z*ES3Gxadj2$R}k^>#Z?;^c6U}qL|>q@PTip`{c;a^P)Hl@-rg~b!zOO!U>m}Y2?Zf zEYF74$yvBP9wq_5&QOzzR1Vii#(aAIrexr^kG~fli9nby&J8*FH-)Xpb>4@7S#kMZ zqJg6>3Qh)kms&{cEAlOaUv@!36zR=k-f3dAnC?MLE_~h=u?y1s?x^8>K{kWcQ}Yh9 z5U~wu@sQxm%;<8Sz60&M|6#F{FoWtlTVbeXoEMBm5+r1I`5x&pV%yQ&1|KXZJhjnl z#k|UQ%xHBBu8mPdO==wHIaT&Cz@CQ0B>vP$&IWq;CYRy<(q1y?S;P^GR4E4-x$32iO>)%MQJn>ngrR&}B7&|akF4fFK6T@n2=Sux<;8oHI za(?uVoLFSY5W{dCzS{F6Lm}^>Su6y(j&WB5|MjhdW8ZI2tglt-WjA~XAgm64^5%BD z48l#I$}){5kXEnC{qVO8Xf}h7491?Q9WeVIn0xUjhyhs&F?%vI>So~ZAFql>)|#5{ zXJS=8kDrwz9(12fc#&mUBBRRHBw3$hofjBt&pAQzy%bg1E9}XUP~~+o$+c!=t(awo z+zuv5-BxqVjp?h8l$jA7>t7~=Lk8rq9x5t8u#xrs+>m+Sxm!dNvboK1)%O#!(9N*t z_2$1bTfN;aZ<;tzopf3$g@w>p;+>bG#(qtnGq=|2uDt1g0_oUx1!KCXOtre&EvWTg z>4x*`z{vdCbd#!bZo9dz~6UfW@IIf$4U3xBi+v zGcPMk?3;d?CM?inDGHybK!opkW2U4SQOHakX3=vJ`fa;ZXY?R-vjRyWeeB8ti)SzfvpXoCHP88Q)T@I)AZ9P3OeKhSNSc1tF6i(j8>6O0 z=%u-hM~_bBv8K+vM7@WNm7M%?pP99y4R7OUH`reF*S8#u8Zso0)}RL>PITVMaLf}h zoL;-VMyo{dtj46wakAka*BZw7%;+xYAJXmZIL3{NyK#m2&zkfO5tbed-@HD3&yl21 zNtN{E6(w4X@mihn+kd(}x@Fya)-$Rir0ROAf{u&~t;;M-qYqnE#87X2yK1#WC*{pmEqr6{_9SVG|IKx8Yd1s+>)@va$8Fm_d5q@YW zHk^Js!htNx$A&`g<5D~d%o~QlaxdC0D7#+%BWP3-a1F=v;WRVRu5yJ+TO_ywpv|+%N&a(AVP``qv`( zokh_OqH|1_>-OLgu5o?%>#vdz+av8FN7(yCWe(UxiBSWn70zG80DU3xoUmFj`DQ zNUp|ETZl@>tN|>u|BaT3OO24NN~0Q>@`I6*K%O*8Kw~aL*T9~Sn01ZolvOfpE|dA( zh9w)PtwN~aWl^zoj~G%Eqxfs?ooWJ1|IR1mHu@!$5G`lH2y;p{{hO9|c3x?njJ8aA zQ!_Vv!Nz9QsE{Wgw2FSs3qUcvIbSzN!sOABqyAA~#jBSrN{#Lh#e#`HHjKFmdl3!H zf}*T@AGtHyeFJ}uHC1Y)Q~VqBzP>^|K&{Jo4X%DVY;RI%p5GgsC1ip`e!OAPahrgn zUCiVKlZ3YOdJskst@rz5SwKhqB5?7H*(*>w?Sv0pV=-u2nH-MCOz_;?-wsV=Jwp|bc zdMea^e}Cj*sAHo0*lY`0|Vfux-9b8 zU#uCgFHDP7NCIiZ_BfE{?fz=cEQRcGq=c00##l0D!LldLi^s;H~I~) zRcEyk=C%v^Y$E?pC_OKc5>y__41p)O1&ek;Bm}#bL`_*Az0=hG>J34aA4&{HY9^MA zadSAUQZsAA+E^Kfrok8r4uldyY1p8oBx(y7;a^hy_wB^R6O#OF`WjICwhe)|hIc`a z%6CCk+-qE+`!1-N;1eSCyB{IF(8=3T@QBT*Zhg~>TONX_qP3>U$mZDh=3n?{+b=AS z{aa)1f=>H|9+nlAjg?=d4Wt^cViB~dK2>t$&-olgP8EfqvkThF9uO-wm9*t$vwMW) zUq6FaT_=K&6CU_iL!>QOvh0aPjQ8L!2&JwDQKFbqEV+_noM{%KoF(sf^Ff^%Fb{Db z)(=}cUQO0ztq<80G$JO`Lf17OunVqed!$CMl?`ax4w#PLQFrTI9Rgn(kGil6QjwTF z6%1aPDgT|Npv3AZzm>zC#1y20^MlG{6%H6K!;j=eCn|28Uwt_@f+=1onjk5%ferQ_ zIPzOJtxWvq;PUdO!>5eSQ3Iaw%2Gp;`}!`IT) z&If66*cD4v{~!8Sr3%Q$s3~XR-UquN`}9BwH>UwNMNgwKkH`E8t!%1I6Y^o(AzlEz zu(gG2>aY{(r4IZ<%(|DuvwroA(5Z?BM9nlmr<q@I7tutHC5Ine9P&gq#mkIUiP=}X_wW2n z#<-?c4!a;GoI*fK+!h{PCWf&0YbX-B7d_@*;F}@}{*%|^JE&9DKus`sNKZ2;Oo-xh zJ)W)}obcxAavlbg-_l2C%T~q)16sgqFPzHk3OS24@7!_bLzjEAi;)eT)vM{SqYZ9r z1wBpv=DkvVA>~laAIb`YnQjGF)Er=q^2H(7PSO;-LzZO_bo6V}{X#X|vCLi2&*V2R zL~AQ!G>1u~QQ^}>W%rC#T9n8YNR<1#peAV1Sz6IB>m;s)Ep^@n{ZKBX#)Pd*uHR#f zGmS;0!~!qNmJD%*L;DT;+z=Y=VK$puFWe(u%BYLq~8A!D*p z!>m+_ugT2Ct_)BD#p-Lt8{Pt?{G`#>?}?^mEcsvzFi$WTjyq-@{*)5WI8ZkjX3!tG zLPOHOWUOLVr*=U?(h6kMNYbonwDyK0Os`~@k9JnVIPIdhNCGey%HNX{rzj4O-}G+x zXgq!s>NtA;5pJQN@Eh^qs2JynIDT^dX2|+bQ~ONEOmqLl2eW~`(4RM)QaomJAX8)) zTa6F?L4Jcf`KKmJU0;M_?OorKT<3AeB8J)w7nt(>-Z`)WVYH$K4AH;1%X~`I{2><7>|NGkNsezm3R^| zQRZT5Rf@;Eve(V|174$6`c{QzuvP3ahdUbD%`2k{135c!U1(M=!#i)n#{3kki{K-v z##Bu_I-mSGR7>^AK{u^V8w8)6*}%*rjd$U}XYKxnc45p;jeo6d7Zi$8KTil3@?>#EsalFxMynf4PIXYeB}fqs39W{KSeU5qc1 z79pvP-$qhqbw#DU3zQ?1^Wh|7XyQ15e}v&7F*`(ek5-ka4+ohU-93n1?i?nZ+2$2F z2B3max>=RX)#sI`pSZlr9hUEfQY&j}vSJZ;eWjJ+wA#Y6PUJvZlIRzG!~f>_ni%Il zQNZ!cuug`>ei*fzu1N#lbQTN3`a{78E>B)WdGWZ+No5jQEqtT3(Si7yjCG@ zCU6%7Wv1IPPi-Wjiqli+<>h0JGtKF z7P9VOm3`=@HpQ-At|#y1c>FoOWo*?``-oE`kaFR3&}4A6Zb~HOs4b}!R)y-2XB=d9 z(&k~uAi;bvbfCgma~H(7I1brq!?65SrdQJyD|{g%W^Ppu+eM!~ZR6#8YM9qLtI$S% z3BpeatbjQ|Ox;MCye)GXr($d73G=S=U4mr|ijgjc9;S`WyxHV1h?I-!5;LxK& z*?_-4?;>s-HdPU^xa1~ILt9&{L9a0~bD4bP6>C=@V%M(zc$0EuZuJNZa%?lCCBPNHMekW-g$7~SyiR)VmECu%X1nI>|pVBkw zZXw9`DLK$a8Anzj%L_jNl@X=-K{I6pn`3u(L7Jgb389&BQVg9G)%Kc>^v&~?1(-6^ zZAOT>Y*E6UG-5##qSONFaqW3!X6S_Qi>(3=rrC<-5Uxoau6ZXO#(;isy3}E@il4(r z@x)4x0R(-^4cFPQ-Fm9xqZ1wDSH(2$;>96;$0?a@=U4^%srp|V698HuE^gJMGO6Tz z+od?LRx;VWQNbmy{Wbc|FxY90Ka&#iG56g=uUy8;YECUAUQ?m3L2GSpqFc2XUJIFC zijekMur~|?rLh+B%Ywj^wZ5jndwG*ZOi%gdb5d>8E{JCa;CqG<8oajRhjdA_V9;RH zh2PtVOXs$0C=wU{uFzlYUJIt7W3v*+M5$-0r>qhZW_(cog>k^?ZQoOFGSb?y+-Kzx zmzEoVoZR+?NMo7Tnc?7c4@ebmnJPpc@@+pt(k_ohWa-NVbbaGox_pm6@Rj~lDa)*e zj;>BJD3Yt_-?+P=11c%ndBSx#vpz_{B42G6G~GpOgL`=M_@feMA93RBIYN`S|Eycq zT+K!{p5@1!K;|)wFSWU~Ja}{F>&_cZ&72bl4#-HVu(g}uQCo!Vw5#kL<6P4)XBU)Q z6iCZkunqQ!&Yi>kLpQ@jlGtVKA542L&~pgK&2U>pz8mo)uF0gBD-QUzudq6CLi?Jh zM=04&so|H20|kK-rIqL&!ffce;OUi8O@@XnP%U$$x!(B6I?YAsaS7EEGvVfR4p+a; zCm^Mpa<%{XAjLuyJ`OXo3u^R7Sex0-M%;T9ZrRtZl!bfKM*Djge@*C6^BfXYws%4GckIf1 zuP^t6eZkeSy;$nE^8MYrv=U0sPw0>bzI=X3r5Kqvb=bLR8GBPA)!ApITfNCG-p6J& zAmgQH%THj^(t2E&W@w-m-)k)f*04HIMvsR#-(a?t7WE(_;WU-#9P*qKK+MS)g&2n2 zPnb{0RSc(u99$P(TfC?|EIhdIIOv>Dl#JiW^3~*y49~~kS~hOHdM8tyy2kUb4W4N3 z-hR9N2qq+z1>dhHV0tM zxov!?ls7>cx*V;3I((nWoTw+h=NFpZ)I*NwTA$s5sQ)Z_I(&wLl;ujH=Fg@P7A?QL zv2vcZkU#Uc25QKlMD@)Cd_;I8vVqo%J|{TIK_-*={!7uTgT1)=4UgK~J2@r_9a{5M z(rdrRRK!Si7bgh1oHHrCYpNFY9`OKWx!Q07HMN?=fh9kEDw`l!^Ej!pS!9mo<}TTEu`|axa{GOWGe4cqL5|I1o{VOL%1!BgR97hy>(iK#!Vdj9q&kWW>nRthJ3>w!uuz)1Y&iHB;#~ zNeE)JWYA;s$Oj?z5>2vqAg*ESA#4hk$Mj?`v@tx`Ha#y3Z+RYa6hVh=47B-qbqGX9 zUA4O0*7{q_dNw>=Y**$|5rduB=pL1)ioEOUHP(Ib(Q|kjV-v{LV7MLnHbG3FG?G|=rgg$gF39md^)CutM@xi3zT zqOrs+E#QNlY0U~DR}0gj5ialZ)~HFHTC#%F7;xVqv$2ac!2So_Kf1(8{IFByRbklQ z>2Pn`7Pjh+?HPadJk8s)z`L4pV&JX#?ftcq&s8JA78iys@jPJLG1vtqHl(%z$-|lM zt{q_Xo7jtT8T@+!vw5L;tr>NU%o@Y+d6(Md{3mP8fpny>=i)GSutN2Lc1LGijJ45` z_ox{sq?x=_%OBRGhNCjd4Wd}J+(#0x!#?ZvI@zV%-U)69? z$S+)blqRLrtMn0cGxFgs$V1LY3$09PR~+_Tj@hv9c;nav$g1s=C7=CtRHb_F97K3n zWHBsHgW2kC?G#`}ZlPOd$tpr@*O#~5>j|j;3>i*r%x&b3;=Y-Aw5Zw8hKTESHIF3A zw4Q_(H{t0Pck-U@r;TPqZJtkYf35Q5tL)p-8{$7EjpK!}?nP5^t=+U5g`!a8B6W>)j-?vlAkDiqR6$@i8QUYb{2It4+!kQccE&@V*daw>oav{b<;+bs{FC^^ z6jBNMm3U;6xmO(597X?y!_wFVWko)wElV3&GkoNf+>o~6xJ*Dz6Aws(tJZiK%SMJq z-iG*^Dl}P}q;GJ-SYp65!`xrXY2yl=WyJ=3+IisnjQ408R+wANl#-78-BsDHxhnFA z#As@kP)RJUsmIy6-mhwJ1x)6*KPTVy6Gu+N&8dPT_%fHrHZO=#R#(m;&)_B8YuTed5nOp$(|MnXK5-UEfTZetxi*U%?SVa_YLogxtBYa zhpu}d1;!JUC*n##eraMJLEEXb&-HtcAfG#dWdRl}8vHv^td;Q(=EOhDkA#>W&yO$0 z#0vl&8+NL5VK$^?mqpybdCE3-G|*npN%6PiapVjOv4_MJ+e54@k)9YrAphynm``3Y z^q#&$OXs1h{BD+28Sq-wNFG48eE!d-%pE)$4poA!NP<$#lp37p0FIwpcVitUzF_eW z@h2%n?DI}+@~x*CV?MFb>dn%Y&63N?@e1oTL!pxW!XR-ZVvYE17nFqFU&1L5DPAZ` z2`^o*SS73s_x6*|cYhT_y9IaB&sz6c4aRU5%7x*V(9B?f)s&l9-(J39$Vb<^)r_|1 z^{vQ_^QzDEPx^o@5u+8N)rj>1qp>eN)VdfqB985@!$3`M^CImY2mV1Xvb-*>$4sw! zK&#*U7U4K;v{1tgeu2c$sPH|_Dm$HX?n|s^DdzMwK8bUy)FKBvA};@ZtH_&jE^RT6 zAwb^qD$bfGAUp24LbU0t?>{k6?pcFjw3XkNvI z<_*lMC=WlRbG%T~E)Su?f)$jFd!y`TmVBu4ovRsrAMA?RDAVLO+c>kxJ7o5W0@LlA z2hQXcxC;hdWp{rLs?5_N^<_u5Ju&!N>EL&-ufb z2$Yu#@*C8`>dy8rWjP2MU6h4i3z-S;zWk=2ct=`dV|10y!)%k>;fn`gRHxkj8svfGrW? ze->8x>JdTGIrwF9IHRyDY>u!;GT58YeCuk{=f%{Y&a5qPvH9s`7~PsT>i z%twk7m_R8(gE{A{(a^rN+qDm3#BWv-6rbQ<^BHH+QJ;p5DtInKT9qF&q6w$d%h6JcTXLdAf3kc!nbdYKfTeo+NB@|fw!4@=o0@`9+7h0H zh%~~`#CFuE?Blu#s8rnK*I}~$puZt!-FcO;o-jD5!nmRy0+dg~^?B`)T zL+svGYXl+43^%I1Li~DK`=9MVKSkk)q&027(^~ugAkwoZmlU2tw-*kQ%;2Mo-C^Te z0Cma0WfN9Cw1OfJj-d)%U~1HIUWlYu*6t=@!47!W=){Cb0DboO6|8c$XB;?D$AYgk zge5y+JlTNc-U$;7nTX%v_1L)(4A|UP0_d}H3rizwfr z7>UUw45#SN~fi8Sd=h3qZQnkKsFpNKObr)nfJ3PuC#MKS% z)OfIAFOM%{?O(g6*#@21|2e8ZG;eDc)MDr+yjj(;mt?V49>xNqH;mO?UXoac!qge( zyu?dw%5kO`^U3gPt~o4c${xY02WV?KI1!?_#XJ`3-hY!|TC}Ycg)7ezGX;whh2~fr z=4Swh?k=fhoHhYA6{FR9k&n#p5pk;USyI&>Ugk$UOXIu>?n?q<`dc8!%oHG_&nBZr z$g9VETB3897dXwLY!CTQ_3-y%@EWlRwjX^V51BZ5FSV0vNuLOT%ojaGYphw@9;Vg{ zYUn6a(N6137R!`f|1^x7jT`qHB!%C&_}`SDs|8p-9dKWD`mB_+3v zd?}NvI`1>Px{c8N@Mp2=N)nD`znu#d68Tfe3JFjd_Xf^$3wxbra$ki`+&Fj=~N;B~Vj4o>BosOxRLmn1) z!yh5HTuzf~33C+FVOtaI*8-D!4UK|uvcV-Sbk!}ccjFPX#ubf|zl%HWLnjoI!Swyj z&Q?9kII#=a*A6H=`9WUF5RsS9TxkPX5{5oM7^xqp06BI2QI?qU&XbPgxxoIkRTa#q zRo7Fh+0zG(MvqMZPTzTn?%U;FLa_d(p(u0jZPkp4FkGS@& z`J3R*n<7ZO{MjZX*-|(8v~Tn5*ox$(4`UVRyFr*sy5dh_n5!N$Ny{(^`qxSIrBjj6 zWWkuH6OCdxm)$TD0GPH+hlV|Z2VCE;_Xk~fK~F6d2yuy+PTqLMAqStUx92ljZdST| z?|CKMXL7y1FSH)MpP(uAF#LPT33KS^gzxrUUtF2{F6b+%DCxGu+Um@_*pJlmZkDfo zAYVE#d+N)hp#r?mTX59h{i7=?LF1gN3pkp$>-BmS5i$0IZy7o{gU*rzK9~KCUtXZn z0*q%NS0LX$fe#zy4L31Q-lw)$xp&I#q4n}gx*580t>m`+>sMUmF+eyNk8S}dc7zOO7y>RA#5!nd)zDeD||P*AJkRM;>*+-KWJl8x@?nmWWR3l$r${%s6E zHW7SD!^ZEo5c-N-P1z9WhcLl-P#uh+(YHzhd3qa;kTfQ7grUiXb5 zYKH6#STe{u-{ywu?gZDr{eo5T1RCiKIsI;lA%-`JkSq&UJsd~#i^`Zz^Kj~)`#4bj z!Ow6uXhRD>$MtJ(MFjja@e3mut$(TKaDMrVEX*ol^}=v!Ox_~kX7QdNBD##;xkUn) zgYZ9DxE1UEDr5NL(u|!f+lzLMs60 z{Qk;{Fs0?Z|MgW<#*f{Eq6R!9u(o#N1<@kkL#rQAK}mblhx*kg^vj@+%b>w>fS--# zFYA~Nbuqkj4Vt)>+8EJu&qdS5bwWL3Gug{R-w<^wS)v-(UjK~je9Xu*_&(g-xTNc^ zF(zk}IcR|UR|UbZc?cf}dRi65xtKr5hWJ$!(HH=XA1S|y_e+j){k)o|D5t``X6ihy zKa?wti{OiV-OsTee^&DQq+#F-sx8 z>QL8*A0w7xsWp#PUGKzZU*0PxxkB}a27;4OGnr-lc$Mgd(R-LV4c;sNO@adIT>FI54-R$@;^>-i8 zRHABnKWnpOLu7p$CG%a-`}zyp&XO9rT1nXjJN4#S(!1eWhx`kdt-fr38pscA_%LOb zkmN$k+Xb-$pO1SL(bwtW-qIyO6s>rXSUo;FWqtpg;ObcL@Co1WbY=OeQJ>je5QDU- z-jY|{)$ilQ)_nN<8dp~oRRc6+FpLHvfv}&@_Tn_iXBpV_)5n1=;}ec$o}qUwI=_}} z|2B3)<)F>!)Q9<^A3g!`#U|z8UbK3(o7LtpJE3L&z$;O4pkCY9de=j1;%VVLJKS0pK}1D1%|iiGP1 zZ{G!dD+2F{%y@Rm8`}@rgEnVA3IT-W-sR!hOfSOIPLgb7Sd44*&y8CHd^UgcAybNa zOwzBsvJk_%?!{}e8ACdzZjN}?`Wq(A zZT0l!G25U7imEGi8Ol=RC``@&PeX{rid8JN94yS6_bf1b-pZVwyP{^lp~8{?#pPdU zg|!!ueY>EC(1T(%`Fupk&eit^nXwLmffDB0h3)n!3+eHsZv|ul)k#cSTXM~EQI3X% z;-OT`2i}?nZ8N9^dh8qrQ$RRm>+8NqM*izyE>7v)uM*ulJ`C;Je5|NMn7Xg`{T*=z zO_zj+PFxW<@>AAQCyQS$=7mK&#|S#4u4eV&FoKLDdDisH`>0>GdvZLp;onxd&i96= zWev7_cni>75ZJp=s|sjJ3ev{#z73%a?xQlJ!z8rLQK<1Cs-#qaiVtl*ab8cuwHfgk z++jZxM{d%+K|Z|b&_yuspLuzGnL9t%Xkd8dM(i863s*1Bo@q#%y!Wfr&3_?^E8V(} z?l_|vZ77@jV77NVKC(ZhgthXf63=j_XYj!*6gISuku-ZfR3~e>{5I~mn;AW~cw_%~ zgc#nDjx2yciJqQ1Ib_n|tfG+R_TA?sC5b@VL5{~NQP*PLG)>G{*v9zoj z;3W31516=I0(uh44iGxVnLsi6;q9lR`5U&g@`E#_r?hIU*Ugs0H3u29A7XJNQ%vD`mI_rxkhh)PZ4F4G}mW3blIY)vLSklf>$+R@&o^N)_=P?d8-~4 z7&H2)V-~i2xxQworkF;ux;D3k*E7yN7{2bvcB?qY@qGSP30UU2j9KNQO!1a2oZW|! z4JnpHz2``u)ju^8?U{2T=$FfYPAThn*V4(wdnDBBdQ>uEA zI&%=c)h_iGHwH~M9&tuiRS!#S{bK{`1-Gtsz1H#ajKM@Ht$Skj#oU# z9dl7PmjsXM7YKg(I_4nKIo0bUp@}E%=LMLcI%9G-o`9=ox1&zz&lV+Q=p}s!P1#$> z4zghTHbYS+Cf2MCTvXAJ*)NPS>7a^ z#{z;9CoP8on9S2;9jhxwPq{f!qx7yI%7_@IxLoDA3-W``X&SfTnzpVx?|hxIJs(rY zv2mX~gL`>O2`1mBsP^&u+a_%P7~goLz=!TE)X$P2$F03^XD3_&B@_05Rl8G0RP^as z)2J!B|A}2{U)6zM#k^%K8Dg^&-)^bhCIR)gfk==Y^fQqAn->4%nnE?;2a3O^)Zglx zMbb=h8@*VO+?uA_YMh#fZ=<~ESfexMp(^Lms;eD9I;X^-;5(t~h zWY)ue$bC?h(zR+FFH=-VN;^$@2|l*uuS6rb|1|w?aQ?sv+dc80h&@qnWGd#SqU?k@&mR9qKK6GOcS=E(}x_NUxtOxZ8ICM%{u4S zpFT}Z`>D2Z`X44zk#pBkY&U7uK`~2k(xPtUdl&VVxj}e`iXkMaoFjj{<(X!7 zl54<|fN1#6v&-*1c0p47$@kC_l$W_RzYI{tpJ;*vUW+~s=r*cd@BJ-}KV;=rpr<}J zzJcLLQ~;aqPd7Qcz+7-PhTg`3n{tpWrH03s>~#gW!?F0~=?a%`uphrOjt|pOHA2#H zV}U+Geqqzbq9rAS!F5dtOt;}RB(0EeM-~OxTIZt@aLDPyH*Jo#6 zhWo3>$C^8(?U|P7-dBJk#6HVijMZowxw@S4J7JcQ2+PJxNU*1Xx@!c~9dA|gG->cP zi>W~=KF@D%b(x!DyLriY9AKV*n4x&mlj=gnAprbk4^W#L7XT*knHd~{zJgJc)@Umkw6mL-fA5dV<>$Lo9EM3EL}3_Il(xE+A3S9gzDhOA_Jg7vmW z`&utMPeQJ_FLkGB*z)%_v{hJ42E8D9S7I#Oa1&R#2(CO1=%E5yrM+w&!#vU}jZZkW z#+8gbdNt{fhg`IM`}ar;)wy;}SX9sYl*)P}-@>ZKG=C15ROBC?g6{Fsw9~#F8T)Hyz6Nj&QUl_Px(9T0jC}tJUmCSOM*Kq zyqJH`^}YGTvuZ_KdPrK64^SK3T@NRh=8clAbBTF#o3%MEX1WRMeC7EpSy`hgw|XG@ zCIhgSoD3tV?pKT4d36x{P>uE(?OwxiGt>uu$z~Xl%fP>syRh&>i7LdBLtq z>+fkuyoBv5Bu_SaxDM{EHfM>VFDw@&Wtg6uUCUN|5?5)PAuB*XK0@w#8N2 z1+5T7Y%g%NVzX(TIOWNd#sv;kXMM=j6HUo6_v4l^m!Flw+8;;1^X1}5cs9_E$@kt# zYK0vEY^lBNWy|B%IMaIUhQ7xi-o?uKu(9uPmpzAtjxVuYbQMkTTK#ts@tja!o*?IA zO1|aaF;i6Vnm~n z03#o!U(l&kxKhyVq1Q`ni1vQehskvb_cni+BwL&b^O~kP@>&sM?0N|8KPfshqcD93 z*Ah)Hu)S*B4ZP#zql^^eO{TuY969FG!V3*VHtMbo6c`Mfl2|zYwW!0U3X&S2VN>ej zStH7MyufgomR8*$t4NrwZjo)Z4KJr=cMNXiFjs~0&p94*b;s`cgQ zUPj4F=QKJ|f;%u`Be zGzt=&u3B^|T0E-V%@`sJN2UR>5@xb$hRu~K0H(b~p*YB1aE?>9Xy^HLGCy2xFaXH< zn&COelxlH!-Xh}qa&UiSy`79t+X2$)r`};}H-WlP9eP@v)N;v}lC|Y60W2NaVqZf#XBujEU%T!GQ{M!$D3vE_QU3A+M;)M8r)?gcaKtkse|YqSf-;3F2CcK_mgG9^*bs z^zK8t<8y5b^ppv@tNybD6FY{UN=R%_bEWAbuIv!#Kj$}U%U zf}h}LVyqnwB995vITCN#YY38Z!0vCc@X=RmXi*tCe;{MF=MXP4*Q=)bITy=+N~A)U zS%Os|W|(5IYHM22_|6Y2gk5SfmCcX4xsA&>+d=KHy}WyaS*>-t`2l6k0DOzwnfiuE zvEy_QKV*&54%iRzH5>lx{j=%o?pX*dA)(8rLiu6^)&`caG`XSLO>Dx<4Xd>gRr5k} z2bqbN>F#kccvs1t8t1m3c^EJKdFV6T+FjgF%_Q9ftciC;GKt_y`5xa5Rm4zuqx4^hdL`?=LHZ5AO6y@04-N^Ga*SQYBCQ`5o&wb*W+&c4@*Ms88T4yn`Kx!)471 zEB2B!*go`ifn$7wG0?Gdb`8-w!;%$ZPX2O(xm+G$hUxfwF{h8R^CKdKu{iua*I4o@ z9Q*hUj=_4hr1dbnY;Yj%_Su77ecZ0FAKYi^2yDsL)@A>@^V)1R#+u}V^uwJCZaZ!J}t-SgRzfT*mGoj&M$uZczl8;}7vNR(Xr-u!3 z34n2zBpLYfFs^uX<9^7w;D6f-Jf8Y^yvuhQ$k1zN{rrUh&6Ku{wnAL#CKx$$q6Fq6 zD?r+uf4(r6K^ecXoMCN8SkBg>t&2R|))*EwgW8zfLo{{>9vmK2Ls%h4VHIC;T@pI& z>Sraq@e-q$=GULw^T-*1j$mu#vL^EGEFZw7d7#rOzpI@UG?VrRzzk>VaSc1>6r9w=~|{|kZLj6efRZ4%>iL+}$M z>|;aPbf$$pKztJ!$Hi$=OF07?)=uv*X>;3%)k2p_7dc-yo(YGlqjlH1a-upky4=@S zXblLS`P_CkbKG0fK-DWEe$DnhY~P8_c~Ciufd!nGluW2_s?E#4XX&{~xZYLjt`;g_ ztq-9BW<0aH{ZFh;$Vc{licdvRq{{0Z>@J9a6qw*>{+aN|AngGYOv&s+%8#A~fyxbU z@1S+x$r3E@%tY7rI+j{iI^z+~X*Lx_LE6;(<#t(Tvm_`>Yx@c6Ia?I!z@Vl>Ue0$$ zRdim?plWrO0^O|{ty$;t4t?ZW=~t8LXXI*ATM8sAjH`{AM*d7n6apyFJY1%J_Md27 z{kQu+qN7qs@8U8O;8S1@S2)(t7dbChl0euR^C_wgrCY5C7U1Ytbulk}srSNtb1O1< zbRi;tM=5kuugSEKt(TAI@0GX)Lljm#n*5a#BKdK*$4Dh@0LEs_apz4_>*0=~8i1|t zic}#hw6%(H7RHEim}$kMO9zL|7dzg0QgO$DhZ*F1VEgleKD`Lkxpy91_VBc#o$=>( zlh!S(t*oYrz5U+vo&)6NWwJ+ft|>`v5P3x0|Jn9(o--Seoqd`uSdvBubAaV!c+42{ ze~>&x2a4I`PzqVxy>56m$gl2ngnSj?aMo*-7ND1ranb{NB&KQlX8Pjr!#i9SEdWqM z8yX-#-712GxCfEara!qBEr0*7>=UqYrhc}}ZeH+)q*Ze}@DbZlbU`OiyyRgswJ)#x zDludm$Dw?ojEbg8lF?wnp6de*`crYK@TD|wC1O}TdNu?P?7j@rmZK{G*)1P7tdrTw ztJ{tlLJ-R|A2hNo@vQdJa7#0Dc?%pLzy{kJgoAV_4HJGGR;umNzMKLqUg%wq0aw*`)%94C8-oenQtXF=jNK6 z^V_XRl9GEcTXIfgR&K`3?zTv-m6#B-CB$HIj0t0=oVk)?$PC62a?ItJ!yNtI`$zxj z;p6jpAFtQ*dZj6Mb-##?&@jfULi+~6eK1%lsc6qCstQGS5$`ediMVT?2-A zHhj}&NWPz_0M`HSPYY5S3FzSlL;8RSj^!jca3bb3_t8|DGUy0BI`Fr(qXO{VAVrb5 zHeagCa|e}!UP*a6{X1`%UMgMPL;_YOTF*CaqMpuM1{Av9Ipe z#IdcBD>x{rzgmg{S9l^#6Z477Tl ze0plIAo!7?8Qsdso?3jnKXp-IToj$OJ?kPqn?%XvtjpZ_kv+G)&s90t=6%WDG+-yj z_R$lg6U_BoGc3KQ|6QcdR&c)pDBiwKklU>e*{gt72<<3$iHaQE{v``eD=rm1O?sm^ zjk%iojZYqC(4m_`o-EN#tQ^~f^^#wZRbM5gf!W4AT5;`B&>K`hNz`kq3nvDjF zllct}snXjnl+Zvjg_#zyzji5CzUVIT6cab;9ahVaFtFFUlhwS_RCwn2DqCuft;BpU zmM+*LXN?L3XXo_{sWBJgJ)Myo5_UUr6PFmy+%PpT4 zv%`W~-7Fh2I186rs&a-6ZW$EiQ3u&ER}>KOu_Vg{l}9rwXkfHSVJ@q0`az4xv^4{Q zAqKZVf{5+bgzp&G(-=F)nmdJawYqgV_@#mZio?d3{~3WOW6RbXRZa2t(r=KMU|5*$ zX@qlfr#xWy@vE{4t9K(68F|L_*`JL+>n;QiWl*hJR37;S(z5T@`RdDP2q|N0o0<)k zx=X-u^L@LxrQ-LjxFVhJpTrVW4`ZrnkZn>b<)q6H9D+VEmdEN;S?EbJ;DBAdjsfM2^6 z?93 zHys!}`E3{9u(hz+^nfShxB5Cu=Mx^aNvB?7`*Hlo;(x+?w5`8qp9S4pSlj43nr1`& z$kHhK^yWPl8EV|WF>$Lx9KCc4_unat+MUy6slm{fC^Q|B>tsQ6tVwXiQol zuD;1`_kpLv%`Z>;Xq{+j&USNi;d=PhbUb1Y0xlYP+St3A&hp4q}JK#pp`G{$e@r9;hWP z=}eQ~yWYyC(awIo)BDKj`D{>wMLs9^{^PG~tfSp_QYu(tq2jJPFW3C_#hUXqo~XP< z65$S-vKU(UZ_D68yGi}T^$5nbkdbrfEMvt%nb$D(Y^bL}W%*1sML?1@W-s~hIv(6O z3BdR!2>ya71{Pp<3>UPf=Fsc1ZSouv^eV!N-Yc!zpifff_YfXM zjnhY}PpIDX@`KA4Nw>zCp|6nlik>Mv!}!QMp*N^yyGQb_)jhq{KNZR~G70to1bf?2U3w|g=p;2F0xEjTFyn;?b#+aJeFgCXj>zOd6yu%D zOWcKmF?ud099#E=lb)(=%(GI%OLs87mVbSD?eo=G)47A;i{yP%>UOr)8frSww* z-G`$H)~@gZf_bso1b;mRNwB21`Lx(F1|n6aC;Ar0J4`V^nZB z!R`6v><-9*v*)(Q;M@Ai-*(}>Vo^aPV)99QtBA7yldPk{y!2cv(?xf^0lB)4;4d%y z@USv&qFckYJny9Qp1&?oc-lqWrX@ttqQr-)`5to2&KAI+P^?{C@Em8kGtHcy%FHj- zXmyl!ky8g)lo$YCh0)%Lw*{Kl_&BiY=HN2hW1!hFtseAJ3UP0nZL%CB*x3@Pp)|A$ByJ>h1KY^@XH`LJgR+po+gyyP>#!VX2j z5huUmFASoi^<9qU(EEld)77?NAJK-cA5Zn3tdi2x7i&$0hbC%X$)87MtNgOghcm0L zM95bxY8Pu?u605t6FS-pJIqEbCIf<{Z76wvO0Mg5(Rz*u~i&H00b@!>mzHwlWsHBEEU zfu_M9EXyRd1{0At*nBk>eDQb{Qsnx1^TOE5HIn6yO6lN6gAmc?p|%>TE76B^#15S+ zX2L!i8M*^bW7EAqzG({gPJiX6hBDV1KhaYA=L3&3vM-dHPjFcSQlj&Si2MqaxgBmW z@)5P%0ZD#6_%de0#x^XJzN^&tQ%m)m&$Pv4I8Wa*?b&$V0gqJT0oeHI5cP#Io%*0; zySqT$bi1V`k@yeq9s5>I=Y*!vLE|z>T9z&LlvkxY(;Q3l)m@5xs2M#bB)ukld+e?E zJxW(sL2eK7=m%+DZuI~4{r;Rg?Gbg>WJLf1U@F5)lik)`K&@~7+DVCfyw8917foy1 zwI$t|8f!1~>1`&rWB%(M!~31ZR(54MgfLz;{FbfR-F( zXbXlqGR;f&L6XLcC_M8AkpE&&$5jn=_2T9zijQL`ZbAMu)+3poM%>w}-#L5nWAaxg z_AQw5&`+jwl=FkV-OFq4*^i1NTGZsCDP~a~c)89`rsf-XWD%YJAcV%-*2le^|x}4 z7z5)83vueA>x*<^YGquXZp=Oh)xVOjCY6@Rn_%~q!xa*Gu<;+pbU#{BVSbG!*Xbg5 zJzYM{@X!}o zxAbux#LNdaP>X%rb&BKxG%?ig_>}f~*%d)bnR0P46@Dumg1p`@XNrAeVmZ22$I%(` zfYWnbxg#mYCfO~DBaGtb!%TS@Z{cb{v`6luyF2M4Ppl8=Chd!3&8rp78UKJpC2D0G zu}Hd02y3K%I4KDjMTQYg-Ic~Eilgx}2l)HhJ;I?Q7GI>EgBv}qOMZi!dK1amDG2{M z;+X8nr)@|YL0NVwzG|{|CAy7m+D_k`q#;@co0>Nal&`Dlt{ta0MnzblA9vpr#v$e) z8Wfgc_^TAnyiP(=fu7|t!~5;(9i}oV7R3hWzD4epea%o@n9@dH6|0G1SztKseSZQI zslzs+eSBVeJ~9$ge7EY_#jAto1`=G>b7$jer!iq8jUVnzoyBupblBTiCs5i=BDa>E z3KM(F3*#v=R!$R&3AnLb*lYnznNuAFj=o0;`B@*NF+L5(qP#rfy1u5Fp9OOX(sssZ zoAhwr_i~`;6W#Tx!w<6{yZ4Vu5nsGVQLZi}BslZcNc<=HXN6~ZQr?{VWrg14Xp|>B zrqFDFp0G!;>DdkBC6aqdTVd{*%dp?+bMj+fGJGY5P0gnwYBA1&h^Ws1{L1imyH>MN zT}0asQj8KBkFxj4tnbVL0@7bUF55Lajl%%_Z_G4X)bCFJod%(fnrYU+y_%)=6-h=b zU(nPVL}>d(D+IG~`Su*_{DN$pKBnuWBFuJA8Il_OT*|WJ8OB=UluSLclV#BO>aZvB zEVa;lzaqRij9TdXiN*2;qW!9Sn_6K)@Ty$iZHD2GtkQpFAI+pk8e*mCHP2Ea#tPI=A*M4}5#5J0Rd#g){eqI_8U*p`vu9^GZ3sLl9)m@HY{6|POi*6|~ z$m!nZ7!InMJuM*$BZZd=k=cLct{?>b{EhBm-(QM60Lvt2%0hst3nu>g-k4d^nIVNv z%d4C3g}C`bnCT)ytPJd69|iyF;wNwaGJ3DTHDn!nMVp$+~P;u zc02@i5!y}NB&zy$wfdx#s6^RQ0i zb@pFYJNmLKs1%!<(X0G5BombROQio z2KECt3L-wk>%gTZutKv;NXtagZ)&&TUr-YUOLc^LZ2#v zoFSsZ0N?b7oecx4T0AL}U7QWkN%V@$rRZPzRCy>fp1@fT~{WW~ICCRJqtsm<^DVj{s`;~-7+ zns`EQqL3ww7F?du7e}Hj$SS$9=S;(fF9qhT$utm^HxVsi`&o*bf$;CXU` z*B{P*#W>9s8zJ#GjSbGS6&pRi@p+a8*Ms6Gj}kXF`W%`ZBI+Si9Wv`-dYeB&iV{02 z^spc8wD~=gc>^)db^t#w4;uuZc%sbtENeRcC)u!ZzZc?I#VB~aDKr10(-)1*kgvcI ze*a!=i>ojw&;!gY{rUmPYjlLCV}e1Yowzae`u1Lu#Qn{>@AkOa0_+>-Xk*IMAfR_ z^h~ZjHF|zO+8x}gsDtqi`MNpJZl`Ezjmd#->(2z)P3oB97;7*xj$*9BEla-3`of(C zN`aAB~Pi2-mUh{@cdCCj33%o?U#Ew_sn^mR_z3seg)ts#;93vdI zxo6g9x9MPg?{FX8Wa4=ou1ihbb7CakH}CQXDSfgw z=;QZ=_nMnA$xKF_a*uHElar;*+W>{$Li@&8w)VmG;cL`VZ#8qyrS%=(@$hwuKKUh( zuUjlh$vk!gx_A>;yha&6%~3$Qhg_o`3cxqs#rvDAO+I@L<=Yjdu7^% z9StERrY31^1FgN$=FcLvch;iOj0rL$%*H;b}&IZn@aFPrfw7j zJ~0p41R^;rc^N&7lPLUCkUaFz27)_Ro*gdrUxCZ-PT;-7y6qkUYVZDO37Y(>NL)w{ zA&l2GCZ!y~u85;;)HL_PkON7&d`*SY|A4NU-A)!KzQp|ac1+eUDI*s;)zoMJD^5Ia zvIh4b&_@SrEp&T=&i#npDaUY74+jE$_9^lPtrUrh(Dvkap#K9dF=h8c)Lyqxunw-L zY=INcK(QTndv?9P!Ysl)K2RQCQe)Fss!^ju@PUoKAVzoMaVGZ;zn;Hv}n36)2?$23Y zzmE4Rxj=Vn>>zF6iWp-eQn%^JI}9pQlwxE9i~TKAXnQTB$t)c_6_XYqh3w708@L@G zsnwxP>uMF?Nv6wcjb>2T74LLLsUS7?^ZC_#|@4CZIllH1$NIEt_anr`FBm(KYY9mozPA~R;rraA>Q~{**=dZ zGuB{>jgR(EHH<-G4+vJ)hZTA;aLIr-;$wa;OtkrVC-{eZbl>;QA0(qfaI*i|6W7sU zr_&@(l)}H7`uNteJjU|-`~28kpa)%Gh)5lP0P)`^SW2Pk^RX0V&hsMp00s4JS0eaI z*1`;Vf#N9kQ$7#5V5>EA>KZzq?dB$*|5rt3DTCz-AA{e35U*-}z_?hDW12%*P8(5vIIv zyWHr|0{>f59mSa92l-3t4>slUp!pSk+2&8=6n4aYvAA`8BQ5K68_}kfYHoD>za1y5}yz919jG!N5KI zU5yJRKE@jG#7I34yF#}9KgWm%&RgK2q6NQU59EA61=S@LYw1Apl6eTOoBCA!!mQXC zq$e)=0H#sXuk;s|!mp-@mM$+fE0Xs2bM6L9YK>#bn4yK0Bd8BlRDx{HHlr*9e_E<` z$@OOcLO`*fH!28nE~>?-y0D8hSs}^8F&tabgT8oS0s_^PtVsAG3l!{AsEJ!XE_ZT= zzxtFK);tHBz<{gH``3$vRr(wd(Ca3QxLDZuzl7R4<)uobknr?F&7w7m*@~Mc9S~3R z{kD>>0wrxU>VM6YZ@d03E2Qr%0z^hi=C+*7CVE#0Yv$7*39I5!8@-yiV;|! z8TC10dm@@F)vkC1dQ2!dpQW>`V^%qr|p*5kY6UJ@>3uJ1&+4v8#2%`bpx0qt=ry`eT6>>xOy389CouOrJB z%rZs)TK7m$F;sd%_N%;s{DF2sF%p~W6>_0}>FdmiMxYQW75H8NIRCLnohPp_Z zGd8)xKjbgWP8>qGEVEXieXLlVlCgXf@nQ$@`yI;29T#0_KER@Cs&Uc4Dwt1GHS7GXan2K8S4RuQ8Ps zY4iF&hSw~mhmVt|s_7hKfoJ6F5aDpomr0f$HNw=kRd=Ul6Y?0*h<{J`vp%$UVJL09 zC34yql^Ffj9o;hMDcVY4sUb%vlf7+Of9;^s0f0{Js<2J5Kh zfm!yF7;tZFdiF;4L>_QN9nc$v(+fGRi|(Y)F3eKlhCIX0G7fT6e5+;+#yo_;Lb;^AYZ?%R`5fi?#!&dx=Q zJ*oV!@siwS1opXG6Pf)xR_p4WOvL>X?B^y40ow1s@G2xS-l<;gJc!VrI3_iX(_(et z_D31o^|3_s;LI`*{Wy6xMbTBlx|WLl!b0RO9Zz%XcBZBBp6x&6ZM*-_G;8BR^;UfO zGuHjVP8@KJ4v8oph^}wDGD?n%HD@*HC)OJD&%9TpYhr4aJOkp-rT*2ZIi$MAd>S(z zW5F@1j6RDB@~dVcHSo8L|CGzoz|d+NrU9;H*#b_-s(?l_vG% zKX0y0Ja%dImN*l{7A4>w)6C*lmlyVCj5s}dRZQzzvX~||H_6CCT>B|C%_L2!OF+ES zMqiE0**}<%n~Nz57W!#A%Lm&soTPcuY8Fnav7_0(lk}IB(`U#1op?waUY<`P_J&yo z%-IthAD1q zut4^IRSqEu{QMbjt`p>mG02MZ9~889|1E~R7pdd%kF&M?Q7v4Bo*ZJ?bpv?qIinkP z8tu=sSyQm)Rl@=b!X*H%glgD#orBj3IT31s21wNs6`jd6+IIgZ7a5;TFRb7)mG$X zEg=9uNU8g_tKUL#im@feL`6JpK7)0CHoNR_VJY}Mf$y!7USdb#x8-N^MF^?RLsK)A z1Lqf~rW`4v$Ni!v5}z_n8Xz1B5m^~#sHvX4?^QI2XzADC;X%sWCB^<1d>3IppU_pK z-=i=U%&%t76DtjDE<`+>gJ*rX*A zkiPy`yw;A;q~7^Q1|1DA-01E;H-^^ek^9D~airuax}J>QA#)ewO1~=32@JcQTcG3S zYMUq`aDlfBD*8P+Xql?Z>_H+QB8+Tk9*6Xe%P!|kA!GRRulM}=HNa2xH?>^I5%u*i z4*thQd>)eZ@{{7bY4LxUi>_#OKl9|m9?_S;z}#@&oF5TUNNAe)eL;L_UdfHWN%UG( zBOc{9@HQAtMBsz?4)AwF;vXrT3wNNLoH=?*38)j(iYckL&;Xwe+7$n4pL|qflrf?rM8pe z0RNFj+|ju3rI`?&H1B&9CfgX;s4%18yY7|!I*c)t!kBT!?t;7WR@aulyubd`G+rbs z!gL2pTd|1zH@G_x=4n_UWDisJqd=bAu}Tc5+zeeMv@|rJw_{a7k=@fA1Li^e>nwVI znT|A8Rg8lyZF};<(4i^rC6F9Jj3#QXX;)1RJ*W%iP;Bmj&vXwX)Di)B-dml!l(UbOaQM|4bzD(LO0X zbK&?(KuC|rswncHA|Su?JON|_pehx7Ur1bq7X^K({>k08#3~|ZZeH(a`-q&Lj;eU* zt!s+AZD9Bbg-iOJEk4KV;wb}JQeydvj|W9qe67pE<=|j-D^W1v&@gU(KDp7NPf7@D z@chb_9d?*wr=1n&)vk?)j5lQ-wT2~o=fR_6gX;f^w%n++ce%8!+4SO5RXK-C*m@8SoHrBbTan6svK2f}XX8m50 zk;=@|9B~f%6)O(0U{v$!RU&pM0kE2UasoZ&UdqZ$lyi}5GlUuGklKN33eH|%=P)&K z;;Uaa)PWRW;?gX+5Jney>kDg-;Tzu*iUe0+!H=7nUt{co3WE}BNGDr~Jj7&b57Q;x zMn5zd_26X)Cm_9FA!M+CuV;b~z$iSCi;489=lmc zhM2WT+3DVNek*CJ=x?FxR{l7=l(v(v;XktX+{uzFq+(G4J>~VF#oC`)&m>Tfvd;71 z%3y?jq=xv+6~kJ69UL0;O;H!S{P>w;{R_(00k5^tihlPz*K636@lWCom(B7X9!}^{ zargn`A^*|+yHQbt1BCb@?ikaL#+YnK#KpYIK>^JUf@dBQT34Q59-6D;((G?Qgf!pz z+DB*)>VD1E2EJ-o2t-yb zEB5%sS!(@o=Y$9F7o1h>ixfmE4iVCrVDE_J6Ebb()>uB*9YxLtFLI#V?+VK+KrKx>1$Hw)EjEO@d4(HuJN9fc|-y7 z%Wp?3Ju+UH^B~A)_BB7+SD)iPe{Mn9u>~U26%(rg>1-}X6QqDPvet;jo}%$li_eN3 zK<dk^ks+_E5Ak@ z>TEapqt`J^*y2WcGf?MvhQm5Dp{xDdl%qU%`P;5)2>}(&{BmK{r|9~h3+v-x85`mk zYvG+alat~n)|nQk`WPLzRc;&)y>_^6b(nN<;!Cc`spbBSPc86pRI8_AH~g0O)U;2> z+_aXO)V@;heHc-cSHva5Ixh4e!;Sn|VX~R935o*zVS96d3ljI(s;fz^|Fd(BzHYxT zn@S$~dv+1|j#@|xHBzW&qD0k*}Y zW~{;%kppy2H&vJ~(P{p6V|K^IP98Z1hTDds^?D$=KyWD{P`xTAf&O66W%(=m+&;d4 zof*`+FM_h9^zDur&GV}dR$u}j0X@OEDQTndPjo$ z!X+enV>)a=8h{Yt+WeN+Bc%sHrq1&gf z4q^WIcI&-ORO7c@2UuOLiui#B3l4p{3KfUH{xxeDmBenw8G|^`AS$n!p}7b=i}nwf z+J%(V>MCtNKKZ;N8KYk(E|nga`8QMfNdtsQSIl@!jnS8)C20+S#^kH=Gh!FPf|Iyl z&epo~5oqM62rErU(TiMQwkaq5ZFeTr%JZ?AYNB}~9uiEMj_M;Mz_??GdPEmV6$2HQ zC{zTtGIVgI@s3tm6uT9OmfQha(Hb_fVtFw1WD+dU)}{X~;Y|L)htJPo(?VOeQ|chO z!o1BqY(j~e z)$9(Jplt6}rseH212sa;c~h6AWO7CC!7`MTBCYhg&%U49Ya6D>J}~aFG$Af*W?UT# zu=BoMOlXV97oyRa6@cM29cq6pRVADUL_x>f%@@rxOD)dIu2&tBdYdYk4u$qv&++Y; zBI)H&XeJT^fM0Q-oPlXu7(5vG*7_Fho(DK1!e~r+In_ci?F;|qx#Om1e*!mGxLx>h z(Sj4>$B@(}xw;8_Kgbi412{iXt2h3;PH;pw{lZ%GK^(`N`h+!!vqS3ldIq+eaM6+g z;GI7L90^Oc1TX%84-z^_NOVh3?zFlAd+`2j(g>h{hMXDmzchw0GIF-Ie^SgmB~@-x z);9&3T?l0iBAO4G&n)f{cH^53bl!r8yFpRtc%Vgj$gj%gwLvWi&iT9&mqPXzNdUm6 zt5le+vo*_I=;SxD8~nV#BH~!>&|5=gGxl`zu0rmUt0G(@D@^4ttL{9(?v8{TWy=al zkwS;C^>NNq1c0I>&zi!K3Y!BFM@s`6v+@09>k%W#Nx1xFc=^{#GFgAiOIF~%_{4$P zuNYt59{R6LnUC*sN-~BE>)48Ur1=Dr)+qp<-i#$ySQ;v%pQ_I|^^lIG2SEuN-Pq5Ri(O_8Swi1ud=oXI9S+ z(2gNGOdrz%I2ZEHC*@YR2=KpeX-1S?stk=;2Ef#nlvP-sC#bSHVJopd;mxnQDnosq znzG_sV|w(6ICh`Jr3-PKK3k$VAt)6mI`C^#iEE}%Zh5BYazD7)&IV13io{4{2VIk< zCAQe;fb+Ltzw(7`l@`UY)Flb7!5fQiw5Nku0c_Y-X2-bkpJ|zn~YwmiA7~|KeS?nON#9X{}TWWwUeEs3XW>oh)EW8)nRHlTRM~6 ztZ}|vA<7@GF+V#DPujOs!e zKlpdW(a_K(44tWUCiF~Jhyv(p^)Za{|0Mgmhr4~WL1rGGTu*`s&I-@T3FUkj*%=B# z#VqDZ8a)&5%ACXF}c}+Ow4v;~M5Ac{pWSnt3%cP^F~)>PR?xx>p7V zhp(#VDAQPW4ALXgQm63I-2k}Yiv!#y{t0P36>!B{V_fn|;tAM~nW;Xq0L}3(U#0Dy zsK#E1yWOop9;cv5<|WOix~ByL#O6{Iy~B?YGXF+w`HN}&ey=HW-|O*}q_@)1Inp0< zpVkrg9-|i0ZmBDFlaaN<7zp$k9i5b}%fl?8h)p#z>rWiAvC;|!3Tts9DtY4NOPR=a z`fLTHaRakk?1 z`fMqJ3>xjuAr1O_Sjx`RzgL=|O2_`4!m>E;%#H;XHS>h*^=zpn3u7!sSA@0X{n_<; z?XbZ{XK(Y1X&G#NVh~__?X83~_My|_f(2`3e*3_TZBcJ<7QWq{PCS}H#-u;c3^GoOomPExL${VN89s3PY;lCSPaLR%m`? z&r8FjUgM9XQ0^T>Nn0woq+?O@*&3Hu_RC9Pg~p;-&vAZ}=Qd2ZIt;T~ILW;zcKYrH zyrhk9{0`JVDPjv#6FsgAcLMYe6i8Q^dA1s^T{ZZ@yHFB24MXwK@Qs(xlHBzF3b zNsRHEB4k)3u6l|k!X1kpr?&&rUPKHD{F-KQ*IR;XHS7kuoXziQd^^*YJlw9$=q zQlK~y!|YB!0No6aX8F%}o*UvqN`jxMkOwf}W$$s57o#zbd?Q5q_`%UIjRQ1P^-rY1 zgB)Tr$Rm_E2v#TH0-ba<3eFTZ?KVY-;`;-V%#xaP$d33K%wPPSV7+x>YfhJd1qehJ*fvvr6Db{cK3b7h>aD{JA$ zC|<++SMuUg_qTy)!&s0`Lfmx!w(E0nyJDAkSPGqzodG04@|}ss$(Q#S20?zqj*xze zVox)&-_9EM{xnwet3qWAxG46I2BrFsh-_<%B92rIii!sO=+I}ao~S7Mibi7p>BYLm zznZh3MZuJ1R~h8=q3jt$@)L(~^AoTp_kX6w=@z^^pmv%ocb)~0kM?RER&`I2UU@6o zV$~4DJoy$op&9)U)`;g^;Ai*XVaj&rR$96%UpdG1#=ls*g$fZ_j6X34At^6q@qEut zyJKT__@Y0}9LdgmP(FzsIk@!b-QFLz=k++fdm4Cgg5wY?6*lWzJx^Q|6HRqo9 z32!i&flBV*o*b*GP8waVyq9PDGIZ1KZZ_xK0&!@UqGt!+J#>6>P~|c0M;C3iM8wkQ zme9TMqNjY^LrRu(X>C&zpH3+PP(e9hFqqsD9PSHo#$J4&2;Uu|&>CjyD_a0f$L*^{ z9_fFU!Xu^E;tQt=-+2<8ga;rAllkjp)N=!uQ@kqHo3D9 z*Vamu=rsOQtNRmwQ%zC$0YFauhh)))u3CV!U`i5GGY#O+?M`Uz`DX?fyxMzz)1*U_ zi_YZ%o7dK+Vjn?}_+E`t7@J~N`6g42Oo>)J0SBA2ORn5$xJ1V_Hz-+?KL89wm$ zxlYgAgW3}Rw=R?HF3*zl{LB^vop&!#f^71VeH|poHu1pEU%UVC)AFS7gN;e4qU?ce z@%f2AN3Y0VL;RbXyq-uSTlJE7g>TUZRvqPq*(V8B^)-p9GBs6)LI{}bI!%#nY)sN; zoe?&1_AVyu-k!laFcNN-{qU*SPG=l5ur+;bi)DM)vLxD?5Hn748lgJg`3KN0r< z>wDVpQu4fpe}5%yL<9rmigJPxJ|6ixpS>~#2Beu7VH z3!Wz({Ms^I#C0ONM+E}sU<-QxYJ>Nb?SL`ayFdd-bBzVVl2O1-7S+AW{r9m87{(Ac29+jXT_N*y ztiQl!uDwGMLVhH$Yr_L-T^DsyF6JezL`_>*XRC*PBy3KA;_p?CzilH&XKq2@QLLdN zK?3>F@IWBPGLQ%PbKKP0LgdmaU!}y)8yii(kbrC8;Hp!()VB3{+6FG+4O%B&J>gUt zjT86aC05FTJ#AJ=Gq&NK8wKMh?|<%5GK2o~AQscNJro+LE2A(6;R#Wn3$#vHWG~cO zjY;kzZ&1vdwGp%ys7BaX+NXL_A212(R2$a5$nBL1z^F~{!>1{iW2eC^21ddB)tW}} zBE2aZHsfBye_(^@FCkE~EJ?;3Ohm=%O*@Sfo}jS%!!T)6u(wNnr=o-X1A9AY_{G$} zJ^=}1A@i5btAb<6U|YnTy#|iuHbfY-^ZU1Q{8sj%vc6t(I`<5Kg5uqgV+x?LwTteNr@^H@`QPdg~vi zQ*@=-1k{;a>K(&nGFZ;hk)`r-I5Iy4B0(NV16>nd(T;17r*=6SVIt>~+#7i+;v~?4 zMk?y<(R1!-VSZcWJo>SjQ>N>;U9NoR=Z2we|40o-Q8|Flrv+8s4Ndgc^7mRy>AIGY zw2(1w^RxWLYxClgiKZMZz?c%nRZ3r@Dvj=&jirh{aoBE{=B*XT`ltIqdgme?+;o>( zBssy~QkznjGwn7edY0U6GY{Tg{c*JT8nwFwAB?N?v&#!sGVL{pxT5iGml{xr$Hc+r z1}X2@zgv0!z7-GkLtLhKZs62-1=ui&RH!b#QSy^G4d^XBsOjg*i+};uo49J6hSwyO zKRLWHAiafKE}WR)r|duBetbSq;EdqGv;|70iqYJTNQ_3(1Pnr-ZPYT724^)Ac4-e} zYD(&>XuujECVolu;#6-%Y;w{1{Sx8?eYT9PnhT)o#-&OV?Dl;>6z~m7@DC_}*FM60 zYFMeX7X%lyMsi5w4_ZnFt?H!?U$llwZ2lZ1o?CkLdK21LgzZ1z+~Oiaev&Ck{{ynw z(!j~;E`EYJ%bQJnvoYa+XSn;@#eFS+^Cp)TU zadP#EZt=mTps6ahd2ER~t+{TjcAXpb`EF=|hZkqMC=4XJt}1jxLy`YT@OXWjNaGyC z=j;bpSv*6`Q{n^0a?Uo$e2WVQ;!`&2|CYe}{qAFWwt%lnV3bJh>$9BsNTdOUb?TO1$Pme^cO!`Z<3> z&NdC>l41=q^a7*HzQ)C8#yOdl?cR!gC+PS`3<)e+f_F>Z0B!D0L@saix47A^vM%ahdwSE3+l~mQvZ(?{c^p|<6an3p^@VEEpVp6&KNlUf3 zIbLV_pw0@1NRtac(1!+AG zc@0y+IM4CayycHKjy<{ZFBI3rY_1MP2cLBb%D9C~kH+-yb?1TG;4?XU4|2n{6g3_G zv6TT9Nc^zk^rhEVl;sZxwP7)Fa4BPH-o$&f%jM~8*|W%-*%wIPcAZwq<&vYcPI(v3 z>+i+kdEwQcvF~5&wKewTLS+pKy#^#2hl~)h2LH{f9vUm{_jhR6|EbccP zznt`?kU@wA=^;N0EK1T}JAY{n0OEH|VLn6kXFj&5GTts`?K5XFP;m^!*dS$@Nzr6q z7nD|4@D!PkarsM%4PL>V&!Du*M+c)huz>Wd1?N#OHYsT&!Y%nrn-Dx6x~r#r$}g!nFfgCVj83`9ph^bwy zR)xezt}e%KEA0V;*-izmoPNB}kx#NQ(dEGyqyh4lxG<-4@lB~LwQuf?Z1U-%>AVXB zkTWJ&vzfRxg{tx?!-d0B`rNE@h*p2f0`H-n%?Re)oIWf=qdT7%AJ>#%789d%wB$kU zQfL8I)DP%(O@}8Z?rr!7q28EkqeQVFA?0U5e}@J8sGUvR*>{In4hG{;Vj{`FxcLP+ zWHKOEu~Yq9b4HzWxBd1{I5)iID)vXQOR0Eajwtr*YQmz3KHVkPz=hc_frGFR0$md} zn*lqb(2e`Dh8C1}nbBcE#lbp%k2oGciefER)>!c258S9PDA(ns|(mXOf zN{Ea&Fkw)X8#wA9qR;qH?Q~b&xkozsHC5sy6YpSepbxjqDHAmPHA9kL$Zrnvl)iyC zKJ}!T4b}tsq6cZ{i`cV9@d-kvq+x(Ba&(2xCrsJZoYd=I1XrCc6wE8!X#f-1E;oqb ze3*yeicIjCQ~yNSkfPwuJ08meFQSENEc{NaTo$L^4uu?!Zu`#(bNVq|VVQi>ct|=V z3081yJOa+&{7p?=>Dn<#meik7tvDjH{d3jKTL-S(^rjc+!^V%Xn&}{^04Fb{-dTV# zal*$8gOSDB*|+~s(e(#1o&W#4>+ZUKNS9EFeM(V&t^8W~weOuul3a@U5j**9W=Ve4 zeq2`((oRAMSNRdcWM*Nu@-x4ZAG27@CgjK1O#5L!?)&OL|KPptwdd>kd_I1$_8P0r z0j&s_K%Hhz*OWB;oFl~W_9Yp&@2Kc9IOJi%%2WGrRO5N-A8U{Y)!?Bii%Ia)8QmL; z$46Jpy%PFMEW4!)jD*zR?!D1^^m1MYZvXt&UhAMy32{7+cY(i1|=$y=DM zjf^beK@=I%(tA$GvgjXio{Y#H6-^zGEptq1X^w83e5o#k~sB)?6 zz`cYifm#PN)h)7HCTdQ%xL^MM=pdo~m{ac|C;gFO_HTsx|Guk@L;yRhs11|^E!0#V zRBPkT&DsuvsDWN~s?r`b1ClZp-Dv!=>ornKc;6ZocL@`$Dtxw_IvXKKK$C|kd%88w zojCCoe)vsS65YA$yd19H8ml;K{5MuJnHOL?>$&Q4c6x0F*OCBIgF%7$LAd(Bn!M@D zu_3I(U#O(wSiK+3(}BMgRWZi*K((2KBs0e&@KLP=x_U$e|Q`1n4&abc@-`ON$I3m|&AUq%E>nZgp z$HK5{k>;pqk?L6jdDV0pBLfIs-iuF3=753sV|%8X^XtoFEW&I9x~UYSm_;oC9*Oeo zhZw_fdXtyf$s5hR2|mjx;x8QRRoNy!;YYct^+s1}?9WB}307wP)l~yXOwQ=tQq_$3 zLEw(q;fqeX!c~DlNRS**Jyo8pjtt!P8s;Y{+$Mj(2ua*NM6z7rQ2pxhz$M`=nz0*u zfnEF_{AlH~m}%0v$u)`LNg~{uonU1ub|ewnkPWmp{acbfKx^%Yu_aBb&hf6I?#f?y zX9ar`)v7TBSfL&8NKqb+HK0vnb`Qj9JZ<3*tz>y`yEoYqXIZ-_iD+|Jarx;N1^mXw zak`d}Jk$6BE!sNxHo%bWr40_>(OR9qgh_ymXQ(y zX#ZWTE_E+lGamUvlI;}7Q}~rp&D$H$e3aR#{dycDPKY{(8~C{ClKBOBM!r8|WA z0A$|zwR$U6J<7m9K?_(c+mcsF;{KJ`&&2vPK0)ZYIsyMpRfvbs-j>|;i-yjCv*NFM z=xhBk*LGO&-*;nX1GH7#m|q2G@OU!Fze+=y<%nYgu?n?IRfY5PppRfVcY|(w&;hoe(f;P@Z*yl|jv-xcYF! zq3UB}4R$5GgclC49Ci|?$Sy68G8<6Q%mB@&@aalFVwy5k=zm#u3q0G_0Ycoq2H;N= zr7-ly-qD`qG1x#Z? z3_%9iz8$M649mO@xF&Etayj3eldnPdZ>(LFc(?A*AJjg1>*!ulpxgegys@J$Q)z2n zjF*Lv)NiIi@4tvN8iN8^Omm5V+ISS4# zB&Bm!In<*bK{)e(Vt!`Zy+U^iHOW^`u(d{Pe!gt$ap`mH>8SW?R6OLyhFEd{bl+vz z--R`ksqGkuparJuhQY*RbjBu;q+%_Rmh4Wuj$KW^-ah^$a#rp?=ch~szY(s~kj3+% z89lJH^^X(p^*VFk@6D4Ihf5pTOZbAGXX8(HjTemB$NS?*Yf4g}x1qLMVZr9n&(xz= zk78W_h#*8{LOqEtBUq1pF|>OU#;Qykdm9vAZ?}Q5cY<6mG;3oESnckT5nvtAzi4(N zpI)X+4HW5u@f9(j*zT2b0{fVs6p;1Jb+$riYd_(UYfS9)aq_z|?5OiT+&BB^H_gu~ zRx5UnmZ$DaZb(Fir|jmw7Jx~KzL{*d_=7IwCi8s%;fe$DG5zbE?>5{d=A!i}h%6g^ zXLWA;C%$Ec)X#%Qu1AL@n+Jn6!|Yf9huPZ9w7cOC5ThNkwmu&Uj)=KI(==8v+mZh= zw!FsqJ5%UQ`H1(FhlG5FWOfpnE_(R$iJMg}3s(+2EJ`|O9Q5+{%u*Glk|QNOc^FcZ zv0R(&ku4}N8yngFQ*AOjh?)tiS{F+cIIQnM&gFT2tcD98`KCIog0U*SU-vurSUu^2Pgtr_aZ{K4&Kr!rWpIAX4jTwr%$O`OJAj1J(JD=GLr> zyYy?6=)lW(Eon9M&E?_GO~iig)|Hxo9d7wMOM9sBzv~-bx>?BwjaDU|w1yVn7t|>` z^H{#EU8qQ>{IXBtm}XkJt92pykg`uZ8WlTW@Q9rjXTV({DOsJ1c_S9YIC^79K&9WG zk&5~3f~&1XM-2xU7EvSevHPoS>`{UyHXsP6Fj(lB4~xL~UL**Luo#hPAVAHvpK|J{ z%S;NRl~9#r1lK&bJSL&60w+*<3(!6AE93qg%;*to20ta-|1|b2+4lUZG<&vegm)$d zeo|^rs0n?}pISrt4$s=w0x6;Amsjnmeb2G0l=(@d*6$9~5PbJv(%2pI_3HOdN5-tz z)Y}27XE`x-j$~#ckkYW!b(`*Xja#lwcJTWbFP3{e3RO&nV*3>4vJ7`{9y6S#at&_0 zVhCzfiyJ5pkw=E&V_OMqjFbj+gPrg|@awZJfDpoRW(CYWiQAq5Wx*>BO zH!(KpWjYBsfHo&#x$qRoa<}^Xb$PKR!#&GGSCTIQoNTsmrJbkQMleE!q6#~v)?liy zSBQdNML%%b;F?P38OL+t9)Cv*x5sj38fw@f=+IcgMnYlq8aidtn}-163s5;_Cf&sB zHaNwpGE9AyQ4=fiPlf|8jVP5#nFDMrI5o9h!)ZodsF&V(b;(T&N~@G_7U}&#*qhYn zBSOzma4#;u&e2nFO2cZeuCW|C=&9FmtRd>?K-fEBR~C6n3{R(&7U)bA8Ya<{3P0vq ziM8?VbO+bVB*O+(rqZiiW;`>hg^G)A9Lru_utw8*vb>n<<}3;3jsDH~cE$-dU8^<% zp_2#pd!R!nNYA%`>3$Mu;7TMhLP!d*4Y+kKwd-beuB{)niy!VHDHZ55GqHr!7rJG3IZAoQL zhrwEs>(x?~6bJ&Wt~tls20B(RT^x_{)}alTs;_n$3go#B4Mo8&G%@uERB8x+SA*0t zyIkhvEGSFOw8PFpQu&5$WRfycbT-;PIBvglqfy36Fy zxIM|t`9f9__Tuv-tv2?}uTO`%p^y9KUDw$7%_Uk+K2MWgDom~nJ1`}4!rC;E%V`m! zT`vhC*!krcHHWQoNG^e??WjV{H@BAh2Ged5Zc|?;aK{%fk_XiNITC7j+A~qYYhrTp zp?^LFSh>Yw#JEK{v$F)SW_g=cjSU$xL5?}qNR0Nyudu%PMw2#1g|d@xWM=puYz_!5qeelUM38RrZ<6MJa0q~I(dvM&^n)5Q?;l9=DA)%wuF@{YFPe69q!?{r+TV$6H zgiF%4ACLe>IpMzbx(L%Y>DE@qv2u?soBVC}gOXb}qN(h^E}foV#ma#Hr^62pDy<-xoACZgS^9hiF1yR4a@sy z9z~)lkjv~0*+?5q{rwpA?2X!DOrg$di}90w4vHO(F-|#4o+9ABF;(nS~QDYx3Q}7k-d-VyCPG+9lg>0kz13i=2Ai zC{Kbs&cRfMHFD<{d&cmfIzMc9<&OFXW&)#A=Av+ouwxY<7bvdi>pkj1j7^tdKs|uJ z#eC?1eu1W=d+=vei!g;b zk|;{?&v+|7sx)4Bx;Ot!1Nn2(9JRdkIzsQIpCD{d`88@?Q_I=zUSTl0*8Vaj+3D2# z#1(SS%Q33&DHURpLHdt-`_7ZFL zD&8m&IEO7L1N1>0y(5T-x`e&>(ba;b1`bl|*pru3t;MjNBa$aEgjoj^51DEPXn(|y zE8{lMsmUo{OVBsg_syB(OcmC$a4#Y9ZL8I9bh;v7st~1L)wtS1^-5q5z=duZJ*BE* z&%KOLN^_PuTb29hoP;nw&yOEWQJbQszGHp;DNmp=zhJw}wRb<1*#w5oX6pLri-C-n zSjj~|W+G^w4d-7L{6@sgE5?yGH4x;kszDHLp9 z{NHlD5dRaL8%UT4w@6W3NP=}2ACVbWB^mP?NfO-EMb`63ZOKZQx12C3&!x{xfSGP)crGva+459uUW1R0XY`T?E0>EzMm^V`y|^kDou1=L=77sJYYD^Qj;n)x9-o zvjAmVQj?VN7C;j*0T^>CO13)t3R8#~pYtXM7mp3LZ(L?eCM-FAZN&kt^jnA+E|2`w z4EtM<=!JUg$?)VL+a(BKbLaY{cZ<(ZuAHgSBDAithf1_d=_AT5x2yr{v6-gaLTFWg zy%rq#_Ug=u{Am-|g^O?_HPlx|k~~$_e$~`YsuB+lHzZ4Z+ttIWNhUBKBnSupibwux z64`t>b&wKLMUoaUuBozPsPLWGKv4+?I_}54jHw68@32oyp^3^;l@1i*D8wJYO0E9x z<$yjlTLDNYMZ_|P3zj)V?>)qqLE$!{eS~dzoli=0;j&%stFq|aDK6HXB5Pc!w)Lgf z_#t-=sDUnly9QiGNaM`N%&Ti-dJv8A=fnnM(CWma`mwN5`OWO#3yXr~SIJ{Zmy9-Q zHv75ou3XH&N;w}+aUo&1dXbk~Q&(2G|IZQ$;DVeQS5s6~D4kl-N4<9q76L9h+Bbt#)e0SA3FP4Hp;rFN4%% zDj#qCI@Pavbk!p0YF?gaKun=x*vckG-N&#cqyQH&YXPy|bm?9s$W;RXEfdx~qW;tu z)h>VxSqq^@`XhY6Y5|40cl{%WgWBg+@VuLFjji`BI%1RLuxt|6#=M^ib~M!mIH2+< zGleS~7V?o7mO8VExCnzCO)y#)Zn}} z=+in^QxepAEDS3?6Lur;JW*_Ix<&@~zCa^Zkzadp?;~4I6EK#IwVwCT?oW<=p>2Ml zEbT@8 zkr@~X(Jta0x2$vmE<7?*_Hw)E(sXx628MG>f>4CO{tzH{ElA9I>p&>@Ty)(eXs5(eCK4I zbfKuG$hYtBg>K(?ju--52b|oLUf!Ud4B@NHYcpKDrWM( z?;QU7ZtCSaLBHp7Z}ydj;T1Up*@E2UvG}1VPvkVDuraeXy_jhN|NfH;pSZL7);u9w zLCvd)+PTzQNkF$EWd18`7>=)h&PGh@p)y;1X--DXVMitT->X-8$&q%K=1vNe>=8M~ zi%n!0;YdfpqjHjv>)Hg?W~dYXfGSW6=|I+dERXYFPH}x?kx^ReXCjx)>c6QYPW9z8 zAs=4d92N$o&}svt8w-2xm8{gD&E^`=d%Qkp`BayfRcsm5a(gNS#Im`mPTE=(yU1CCz^2o*PeJNRH9O=&tA z>{)o4g+Td@FCzSb2*+KD_aZ-=1!1|}{8IQh!$BBYTeiDo$@?q~@fV&|0#4^&#YQQZ z`^?SDGsDy;K;Qlkj@g@SN$zU`c6P|$a_5rIgqnMm;&GQ!gdfeEi+;K8BKNqaDz(wj z>VR+kdbx4**)51ot%cF$$=z83#n87hE8f&5YDe)MgTqJ$bIll2l|Ezj2?yoT>Bm;? zPL#w61ElQ`t@5%O8D<$B^T@Q-IhBa0y}AKvzRPN43#f<9TvkKy<_M|72S>!^6m*QN8u;KR*r7W)&<6ZJB#L8+%}+XatiVdl0RL=ArTaT@Fvu2zSA= z?XD9hDqLPH&^#oGsR~vemJzsqbBIb@rrjrsO)ojPR}5e*2WFLz5VNeK;&by;eQ}n3 zFK{Mkom0qN9DRXw(UdSrDRY0`fU0%AzOdG%C~m#tK!^{{4d-bzxn){|Fe*WH(eJR# zghQ+-Jp_{)(kPEQ3zBX|UvRz!7-=C=A%Ce>H4Qu;W`KSu&fhzBKw`De{U5G*XynGN z?rTHMR~wi8CQkIjkAVh_h}d!PN_~s{(KW^C>y4y}@y_Sj^Hy*vYA}AOvi1Oh=Osst zP|xx>pSzFi&W={@dn~cTq$KJ}ZjK%;|0MTxy|uL=69D9M-_lc; zGd-p4eK8ZpDbTUV(hO-eSP-Wxwe<(O{{7ItHgKpa7wB60x#l?LG~o_Z*uq0UB=HGy zjYXYpj%_q0P}zo}w9k1R^?D4uc&u~g=pU7uk&@DDHyOqMVCqK8L-P@M;IP%Ys@_7G zCI|-iMiKVdv|!fJ2CU~9iIXUD4jA|K$y*p)KI)4EZN!hKiS=Z@g;8u!Eq?)Np>Mc< zH$pUiHN<)IS>dB$M3vtF=#a=(_trKDUZjk?ZV5qaK%{vhkCL(rt~sIHkx30_Q`1(} zVT6|5_6+Yypi~w5tv;MG7_|{-O&b;BN3MKXm|Mz?1?`1FoGEODqyB*Bf$cLFsX-u{Z7=pLNPi;4{c)g-YE?&JA0jGA*s)tJx5mJ-FW-ELn*k4!BOiyH`sa0Y~R!8D4;n7 zCOz%fAMIA3)W!?`Tr~LKnJfYQ>k+O=GN!43`F{_tJr1fSmgJm|hdw;~1REQtdRFQy zANX+s|6+gR8nmwkSl#{G&hH!4a_6<)`eWjssQ~}?KsZS5Wyw!he7qtf`ZOpY*{x*5 z(_%eTj5m3h{6h}`yrndWP5+2vCw-eX{T6}M2%6u2`i((`?b#JvuG>Z4LR??)M}|qH zG}kVB;LnmT!v~wecZqtXu`nzzulRJ$jF~<27O*rz@mpaX#XmJro^(OcXzwlF9(gOZ z6Z{xM$_M`tZiFvQkk^xO;}$qf1(kO)x8~+cl5W;_j!13IlA62TvtGE_OU#%8_VJkk zPc_#?p|85v$qzh7+T;T7fe4j-e$DqfshxaZdarhNoGWwAub=3)c0=(FV=GVeX!g!; zB%#Td?o|#~q~$d@fUe_Jo6ER?e@)UzTj2Mj2JL{?Mc2LdyxC>D`z(0?OyPg2%G8F= zmDmeHWSZHoy=cM83q-+QM8a4WDo^zijn&}!>-%9+)mE7+n7z-NN5;G`(7DMq+7x-8 zjKI@7>TtT8yTs0RM;{)IeNI{N3X778o*>aj*C5+PCBUnjVSyP{;e~;f;e3xRZz~zKDIiNcY?Wj77{3LLjch`s&8N7pP4uYi!_ArWOL`O}e3^xS8;PNd=yMe@TS{7P%7*Fx^Kc1kL_$$ZG;)s$dbP z>r4|)rtnbv-F9CT0E$j8m3yJpjAr^cNaS&8AdfmA3RDY6R$y3rz-Vp1`2wCwhn2rV zHYMx-O+uh5mcUZjJa8~@Xn8e&fs{S*Cw0?g3~%zW;gHeNhlj)z7>JAH*Os^!Of1bS ztcTqoeRI)rQtIZ6nmaG_JbYsk=T7_*d`t5Pojf?5xJsPMjA|#6eK-}HP zUI-wk3mV*a>7jZWIWFodj~OJ8(C_?4rNZ~Ax3e7V;eyk`=W{Fv-)DW>r8FJOl}JPH zfGQB4%i2J06aD0;eQ8eW(U+8+`o+#*VV%(F4zMeQ0cgZdQA6m%0^jGMfO*o$j{G;N z3X=xkwj4iCM>HwyI%Cy+CISx95#P;i_$gJOt#DXi*y3D67#Q>TvxzH{uz_!36-&r% z@mx);jSS9*=qybIM^3oUEpZRS(Bg8A0e2P4`ie!K@hv~Q*clI2I-*@1iWMN5|9<XhkzsJ}LRt8Y;kSm^7Qmg{=r?<||9qjsWl$|SdG)UPNQwVqucW_Vs_RL> z|556@Ex38;WMn5scBEptTexFQ1JVuGYEam>q$S{r_Y}AgRdaq5sw`kB94z@)^>n>O z(f0&#CFO1P*CN}<1XWJnbpeL&9U8#;>|_Uyh6Q$~2$5FO+3ff?$359e@f8v2FvyoD z+T7?$eI+eVrdnyDe>-FU;tPnpUV{9>jDTuRB(=4BQrPQH5_-ydwyMUR)_ROq>AftX z;v)9XA@wHZ-Z8A#OVrlE@TkE?4|x)%r4gK6@MbJ2PRc;`wGgR1Q2djyBjH&8l6i;i z8in+&8;il@(l<-!g>Sg~8OI_@G*+;#24$O5;-Lb4kX#XuiU)DWO_?jy-BDCuVy9)7@B9|_`hp%Sb1{)Uzu&K=8ur&mT9!vhZc7Mc_?nmv9lvh{VKn(k4#frqQprs-el)z3&xEs^l60J zM6!luE@{nYO^b}eJv6pBp?{r+@1{Fzrqe^QLZxV2`3k0B!P)^C9lvt2JT(r;5bb&P zd&y>#)05T(cdG_lWcaAB;I(~$FQk>z;fwxJHu$Wx8Z(1{USdz(^~y`fB_n|g3tWx$ zkjCL0$yLT?a15l_YfZwzJ2wco8E&VVsclnkmdvTtxd7_MfpCb@aAH3jM%?GTT?6`L zKxxuaz}x|!u7Wh2W_J@OE_v3`>HbAfw`LU1A{5>Cd4>GmDt+kTwqj(2ig3GrBEJ~7 z<)z}EQTc_r@GOqNW9~O80;90`p}!#h^ULh$^fQwfV`-$(Jzi!t5B0!g}Ec;!vo$@pnwvBa+heo8U6wJbdist%&IOt5Fy3 z`Mg!VSZqNFKBo4+@$v#e%)#xZRXyMs@8Z}JBvr+E(~gLaOU4qrATJ%)knGt2>d>d8 z=N#vISVtZ$*qUJ8*=`J)0F#_OVsT9J$DVWbH+ao<8G~BJDd0isB1&xfhoHsdikd5% z(@UA^?%aq(cT@Pr@8I%}n#)T*RPX{v0z4povxDgxL z3R*~g-D70v-+>FAw^J!4W_)b-jAoi^I3M@>-m%Fa?Q_s`r!o8}8Q@Yq5)wELm$8KD z5?Nl<;Uu3%80GtrPsGX3PD`>vnE2w<(vRnF7|#c!@-Mehml zSH5yJ64aaH*jt0tq&xAIiuZuR+^vfsU9zF-^KlRbMEyzE>Bxw=lY#Oy)87%;>AM}` zCddgbYN;qDYU9x&dY|~5npJo2#Tj`{4s#4K3rnZN2_#=;dV>qFp?QpHHub_)F$FQ? zP*pBG#v@IqnGO_A#!(lU6Z(Hr8T33@U zTyZ#U9UHBh<4Q@!e~cM5HzO%A_RQw5v&`M_2zM8E2J**+!Y~HFSsroc9@@$oO9>`uvj+;6R4%Ve~n%DeI6OK9M z;SOX<+s4&f=bvXuo(BfHv@&N4`kL6l(ZSE4W_!l`^Wza5Rba39cz!1Kiet9)V6s71 z%~sFHO*(DR`1~nfnGG?e7uV&7yDK%@_HTn*^|JkcR0KbP^=25y~ z%s;XW#4UwGABHrE7cnz#3^X#Mrq2${8Aw5S-HtWhdoIUJFIG)++;_&K-lC^oPevli zkYO9CX$Kn<)m!=2@4dHJa(&zWw9X^Bi8m-%b! zLBjC;`0q#JJ+^?UHPX5HO#In=W=kXEu~75gvJzTaOGHZ#c*P?n1@t8e4g}V!b?*pl zdx4$TwAYf~Is6841sHnBY{Q>iV3?F@eyoqyw@dQr(S^m((|{Tf72Ltj-4y{)B~vHv zqq;+k6nor?!WT@=231nFCiQ=bVd%mlR9i*QsN`^yMh#i%rUb+-r^S|vMrcYOn%nGG zhBA~F4tlJx6!4Q|`sV~>_DT)v==JW&lc9UhD*rbe{BVy5E%y2nocscGn^1*U2~QGF z4ZkNTXl4&ePCg3%#4+utfL>E=AX^Tkunb>^gInrdZGyA=aLj?z7`y#cH~rCpq@K^; z6sSKNUFj*Zo)H-K|30{4A$>C1R*=NC>WOHfM`$ZV`>$+5_)g4HGHG)5s_>%-$Hj+{=#X*P;Jn}*B{_Ec^?(7@f>#Y7+yroKoDVr&>9a1ePSj)^RxcsuKQUXg z6z{jGdd(z+k2KyCac1y=_pziD(N)UG$Ush^`X;oWDO+QYaT|1~l~FoU#44=M;EQ=M za)sRmudlZ2oX^a5scXvD;qzgyAMO6cHN{fD8h4R@bxy4DOFFkV-#}YD|HvaxRYWa{ zR3Wz+E2kC)&gHS=-kl-*h&UcBxgZQ6-)EeN)QW0G1x8Ai4%Lr?2%#**b($3KEhll| zQLCId-uxSLTWO%^p7KXm=#$)QjBzM);VEHqQq98No567G+nM#yHv3f_xA1LCH&ect zS<6B{4XI|u@K&%H6ld^hf&-#UGcLrEF2YW_vt(&$UL0K>`OO%V&_Y{bziP`reQvj7 z(Iqitrxl@gKH(WOQN2gPTtePAa9|(u*U;DTr7fQg!7+k?GHP*SM4|^emDB1WCRK-u;(Me` zcy*^J9d9fS*R%<8!Dtd1f-GR3eNgw&Z`)(pIg@JA`yIx;fqFu2Iz683!ejn2{t2aN z%bi^K(jx}+XvQNMI37Rd$wp>epRw?Uk?zkTTUb+j;p}(JNpEB`Yol`-PzDBR`)2%j~3-R4XJ4j`D~* zBZtOiBHeRMBUi&-7kD(;a`~Zta{N-qME703`(~e!FbXXaf0~t3Qx^6OA%u4ZnlT^g zEftx@7PxKsUylEZNxfGxMpX(hBPMJo4kTyMdpkWmQ$p z)wQ0WiUF-cMS>3_;<6JGoST1qWZyF*4?M?Oq)gT@DM5Lj{wL_Iz`TX!&*R>WHV>+6 zqSu8tQkzo}FjD{cSiijVuf$C59x4~YT?q7yg3xc>!o4SLb8W&$0jorhw~n?Rcf_bX zNz~9lS$~!vO}Re7@ninqyZYqI?Y&-3L!y&i;hNPh*}w!uY5ckl$TN`c%(^=vx?{f% z>MHz6rK3i2U(?&br0sICCCG1^1s0F+T?-@K>QNMx^Z{cweD-{g6eSzQHmiu%kR?{=_mj?nL=|-s4LzgFv#XoH3(<_~GOAb2rCZ+Xej%R^^ z?6^k;vYz+=YnwY2Iw-Nu35t423+g9Q&W_-8W6k#ONfk8MdM=iJ((>dqAAB6IxvqZC)iHX^?M zg6CTO81gC8rcJddcHArp7tuNgcD2udQrqEc%~6mJH`jj%O1DiPR8PPLT2#4JLZ#ye ztm|~{C7ayAB2hxXqph2?ZcyGNPLoYH^lubSNk6wVxG8#3*oSU4YvbUXu*B zALgs%2NYCieriXl%%ArJ-~D1Ls<~{YIzjl6!OdrQg-t3=n;9+)*+9533`N|9-6|7Q zERF5Mbt}))?c*zdXGYz_OY^At1s)P_#yc58l<~&EPKJPV99r{kvxnN$Lbam0jV6bo z1FA6v^=`d9?(~j|ox|A$X`Nr-^&St@Mgf{T)gMZ#uuR+ym^7>VT$A?zL8#+o$sW3%@IAMqWZN^e zY5T|xQRhKVpL=Af^$o*aLk zlY>ZDRQU*@?KlKq`i8q#w&U%LTg|yK+YzrzvtcP9!BQ=qB){eE2~6Vf$lb=Qc%~hw z*p0XSed_4}xD8M^rq-$U#Ec)QQT=y=CV-oqo6+ydU9k zHHq+snSm5OiLfKKleHIHuorpx>bF%c#v)f*!YpWpLThkW;YKsTj5#EoHOCcRgrwpR zsQw|uXuxJRr@#npu$hcZnq)~DBtEnzC2&mxGjKMIHD&%+KEkbx*0U9TN*meOtveg# z2faMTU)%5fN{82*PFmV_|0Xtf7x{M{lezGwD#-SNUl#~)#lRYCn_dTl8(m^rT~$Z| zi}dM#xjW%JEt#QhPMPsw<)MU?KGkd}+5YH*c~4Kr^TQ5|W%ogu5jm9$@O6r+bG8?+ z_@h}ok|?sB;{=w7Ry+0~Qq)_Pbkzr%)*g*Poz@lzvy&mAUK{`b)y7G#e3%om-xTS( zFjb=zKXM^N0k?^T)8;eC>yU9`K;4@60ju`io*1=uf!&6kO~u!j9rBIG^1Axtj7>7Q zq(KH3nap%DzWczV)D@VE0t5W3a|@o=E31b{xK)sW{uOzTfFeH7xf#I7nZbZ#*^OM7_2gBeGj3?n;hrI%1Y zpAhEbj{Y$h2I!TjxbD4hzpzzYTSN>*!Bwh!uQ(#>NJO>-cT5w?vJfhBsYatW5|!Kd=wh2<-H zxgC@kAKK*t^l(66bm3pGY#D-JDpDaP*A_^kO68!|ol)=w^tsG2u72Z?p15+4})Qi^0?T^Jw z)<+Alq1+xny&-wIF;^282kYO5%ptAg_1eUq@O^xTc)yWrcEprs?C}OD7^>Cl~+?eza{(AfQe#= z9+_-QLqUX9l;m3k5s2bi+J;!fRxGLw!bXqfKMq!Q|5c5NHO>W~h0Rs$|D|vgP^?9t zzlORAGxAvtF`{6@8GP)~c2$56f$A$*B$D&VX0oa&?1nuS9V}uaM zeM*uSJm-05gy4!&tWM+LKVBq-M(sn$1yfolJ#-?jpXvP1Xz5Fs;82 z+|$5DF8jtH#*(30;l`t8tqS75u%qBxc(YGldOawE-cOl~u0it~KXSryn8~V%URezf zXWCJ`sYTeS1c*!Lk9|PF$C;Cpo`t&I_k`d$go&5&XVu-o7SfANPGM0qPnqNL5VVI( z+1MF*Cp3uBT?r)ZwW3b-mWzPbMGrKj`4toF-`b5HVq*FPnz z3@1n2Y+!63u84^7_V&*_=aFBi{^>GSuac7leIvEK8PmGIxoCDYVfEn3RG%TJK%nrF zYZ*te+k}vG>zpR`qlhrXMBcBRO$7{BQQljEmJHdc$=}`hI`7=8yc>Zb{(hLk>q=md zB&y3V*D3RjgHZYPS1#4oD3`VH z{Ou{pKQz0RPgHxFfO$A!apRkr(LJGmck%fN%?IZ0{e6uAwr3_5xnMFyem_t~z(n~y zY~60s*!T)pd=%blusBCQJ^cjn{`b6G;`TsHeoQs9y~#g5+3Phv#qP?`@ec3eefuf! zt90X;ZK2}mDW6lG7GG2GfIz^g+SbGvdg(AY%sl3_QyHfsF@BU8x{3l)N}E?x=5EIn zL|V|!0hYj^eid~Uu0fctMQ)>-#(Sg}Xw0Ph_AX_o+?M~ zzjFAPe9I@-I!AFiGi*VccO8VOk2AxVn--hrA6DC}zM_rS_@iKI`#}HcB7^xqnyC~@ zhSk}^s~n{%GC@7S1Rw%WTUgV4WxOiiDUr+;Z}&2@(1~5a``&%wT@G{RGW5T!8e~Kh zVZxgD#lh%$j7BnapXqHsTyH$i+;N=dswP*!ny%gaVW`4rUi_ zb?xhb?PJkW*5_60iQ7!8z7dKp2p&Z;hD$(HH(zOdt$@~{5qE`{H?r#e4T}xU@oQ#X z-2VWO&E%xKyvJkDu5!sUh~*GYteA!JN%VR$v-Wu^%H;BF+HSR#5b4^iaQYIpp4DP% zxphFevC?vrQIBr9xz<`jfyX~?ZAb(ByxcHc&Pj3ZnRb4rQFzMO)tS}H*GQ@o0rXo9 zioZ4{CA#cTJxxAqgZ^!uvzLGqTOu;CzZZZp%~naVN4%7^t%cA~r5;ghB@lRajKC7d z;v$_Nmu&82HNEK5dLKrNnIA|5z0|`}NnBFFno~__GDQ9>Dy^CN_tdAQLCD5x9vXn7 zo8o^QUZHlYw`D@F0+YWDe1s~{r9J4VdTpQT#i%d#3rH6mvRKM(=4=$nebW98bj$7>(=5QjE4zeic5)hu=;l@sogVa zY?gsaKI9=K=}B(O7RqH8*ny(Kv=y&*EC08%4DJ+eu~ie~Yw7U{YYf^V+LJF<#h^*Z zed;mf&f56CXS^jv?xSBq6_+Le#DoKcsbg>5j(7h4aOgo3(HN57ZPr&cMr%E?_SB;b zng+a7viPoaSAJ>X&8lkedRQN*7u!IXXS2sR5s*^33SZQgaA6)#AoevSlrNsI>eIgo zU3O6z%0MKLtf+hndz$?WZNSrgH@q(Q#!Vro#D%LaNV1a#P}L~xWLw!h=G&6ZAHUd2 zZe1)NA>c;{^Z{h@lbLHLbmJ{9t?jQc?FzG=g&BnR13(3Qs6UR#G!@WAA|LWFa5phv z>_D3Dp;7I?U;I^1#_`_dI)~GW^AQ`6yM%hW*7N8B8%}8QnjEwj%~XEi36bPgZGhvf zYJN3@!%P$*U~#p9u2Gi+IU*aYo>9_~ZXVpN?OM^%_mAgG8|IEA&G2Clz7lopWvQLW zs0M@>&7R}T1CCXQ5SebKK904|ILBnt>P7nLKa6Zkx{UtCd z3f9TiVaGO>1(p3-g`495<}QVlR4~|GaNgj+Fg_AJ5?w{@K`tGuChm|-8gu=YZd3-< z(H|4v9sZrv8Ry1VUn0tGgO7#SX^N~@{x0k)Ig|<3r@Q<~9>%IT5OY=RWQl)6lML$z z^G+-K{;aGIFj|lfiU~VbTaag+D;=&+Rv=ec_WA^`d)QU`C{v|^^wzi0>SuYfocQda z@4ZfIEK8xK)EraqQV;D*ynloIbGX;=yzkYK_+N6y&Y*j_%k0I?0u6=Lmy&%>8jA&9 zjl>F$TXI>?D~vhz9nBgoK0}fwum`91jz;fWbo3tVb0f+ZsELSS_z!sKl_`LT`~uK8 z?_@1>A>fN>iVz>BUl;huyB_Bd(>lTJ%rY5$8V^5$MJT?Ll}uyq9~k0|8PeT4ye5OS z&I}WDxCKDDU-I`H2rP*{z)JjuM~x`9Rg1y0Kq0v=+f;_~du(7bWd67vBM!teH#Cy+ zGOYJtE32Kn^x&dZ9(nTmm0p{po)2z9GUxaBj*t-VyYJz9bae~Bc5E-!Vw4*acUtu# zgeh>f<*IU91vQg#;VmodzZSt=r>S;Gvg*#utk)X|t+vTk3!1z7tGPmR>M96rd%!-X z93~0Cgo+0HG&WCdI34P2x6uvl1gBfI+NO*?z8A`xg{G>%|9x%m^X#4~^!}*?*w-4= z;9>u~Dp9p1qRnM0H+6w}lpGXSCA+!OXHF@-U)53&3Xlk{A+2$g)9-^SbCi;My-xJ>l*F;-$Q|-8b zgwlMT?Kerj2dZpCq^*^UO**ZYIXT%9j|&o3{Fdq_9B52SgcX?cGqFrQxZD8BOb)i{ zXxBg#K9(yhGzFE{#gJ@BJ#=|hl*;3FxB_euq8_koY|iu6Gk8DPMBMWTK6$7Z zF%YP)IF^L+WHTc%+EbBr%Z;=&|9)f+A-%pb{h`I3#|JUn_aFPJlJ9mhC%m;%ZWfuk zsNc{gZi) zc}}S}vQW_%D7P&Ywbk_(Druy;PwvUrz7%@;K9}n$s+c#Mgs+!fpa;H;w(~;Wo+yul zuB#Gze6$9RMqlN$k8Q78oX95p*>2ol#f%R|N>EawddrB=%W13noqC5bXNKD15~RPh z0-SKN-S;(Qd_#Rddnk8XoiF5XR_@MzYp_fB^>f0x33>v+Jh}M=z$`PZ@cZ2Ix6jW>GDq#X1rBYfRW6Z~)7rJ~q^vBgo9c->%ED6poWg6Ys}8fmSK4 z!Y2XD6>$jA1Z+{BRRb~4Gg-00;S|=NAj`vH9~cFp_!JUpI{ z1DUlF0-+bLghd9wF>$8?eHmpsQsy`FMh*+`+jwhrawFX)T4|1jmmvX+oK)I!6l zUMDa-dilRNoDtpQ+LbP?bJS}}Fm(UTqPZXB>5H`oOGJ&ZkL6~ac07qmx5L)yR)QXG zSO>p5;n-9eBz-bbC_ppxfPbq*7ru(b9%Ij-(xT}Cr^iqvW%zl+>rb3c114g%M#-Fk zihf%Gi6Y&I|Da7xEp&MTm2Wv9L$$*cpl!335lMaF;Cr~`8ywb|in(unIzFvyYAJQF z6{LsuZYZZs%|yy7QnCTxZ&(R=>F1QKCVOTi#UM&&0~Xuts)6XkIONaTEl+SSzDd~0s&AtAMC1^(3>(c4A*Cnie#AZ}0 zssL2nEbNKtb{&sl<*7(%#HcTp|1ENb*nRC)nz?h<4&94n=n3|M)sfNG`Pgyay0V_} zK}B~7JX3#yR7(U*FzIYvSm!B@tqig1x+KA;l(g_~ZN}CM$hs(;G5wNrBu4r(eR7gh zv)1A?d2Cl)AbaHDxU56k{_5QtT-M(y;?2Cg5th(h)V&mg?vf9>pPi! zN!~4`g~B8G8oO+@C*e=23ql^I=!stbiqAgqN_;pZg>aN!v=#I`M_XkrUtyCDoyogH z+or0eN9bcd4c5<2;Ze!B^9`l3!eaWp+(Z8;_S^KC>kL%5Y47y1>-?zHX=rI0Cryu= zWpQ1QfQ6scJIpXBYk72Rvk&Nq?LuT$0#een&b`L*T%pTYm@9B#Qn*%ZtYL@oh!QLDT#d*MrHn9Y_U?wMML=2 z>vy@Yb61-75z!!lurN&>u$hY6*vMnlxIVsWc$|OE1FrK3D^vapb}tQA0kMFiw5h(v zD#3P4Lsa{`1Oeju$LUX-pKC2y4!D>m>P(CSS7EO&d7b$EPTJ~`vLBl0B49LH?XQ}s zD@QZdSo$-<4CM1S{DpZO;1u_I0cTSwK~^r>w+Jw+Ey+z$=kL+KB<)!NhH*k0++8)m zE=0cMJBa*efJgW+Lu0-37(1+JE|}Ot^AYj#1SId>`wFHrOMPim zg}?PSH+^W02JVE27otWQ`LCXrRbu=@dE}ny8PiQ%p95bshrdS`gr`SNr`wuxJe#%w zsXQLsSMIcEzQg~oB{sKE@4%wj2t$dmvca+ycDR4&T9x9;e9f+$-PiJNa#5C6DlVT3 z1WQRG))ehugrh_E+m*~`UZQm z!@0&EDE{F37!;Y~U`L{^T$u)ooW0XydRV5}hDWQ+-?xNTv?Hg&N6XalL<~05GRJSa zo#Kaet4J=NrhT)6lv<@Z+88}llK4wWDR>JmINlGW2D374?%x^4#!a`TwI*aY**&Hs z5k=}$_!P8}mHAcwr%z55FOqL^!W(UC`BtodB(5vgc_1xv6vT3eGc4|Kw%zJrzT-bG zuT6$~%HlMJa&ZQ<3cY3C!`_6oT0A#eD*SE=AflmL>26mtUQh2jO&H& znilJHX*lpX*oQULLVl+N&Qt4)l(vc9cf20y*ANk4Y;sUSRTXv70n(vIOj{Gs*w%S~ zK?tzfRulM$_{YPfXz!v99K2xkK~1l7(ytRO=|N2M-SKRQqTb5eueZU||Ir&|Xu9Q> zHF`eCdY`1oH7@e~yC4-KMUHw0dppc5K)TeDCM%#VkgS*mop_nTbcQ^$`aiXHKvu{2 zYY?f_go&JWNL1j(iz5wrA3%yTc1+$}b5pm^p=+EtTUHROZE44^?CdK^Z>K+@aOx zu}st0oY=qUeY&*h%!IXa{?RiLxgN0f0Kwc`O{(<_V{4gsmP@Hc$E>zO+e4hiJ79|Q zhj;HvasmY}vE8o6Lwc7xhi?r@Y&pm~DfXC_F z80C4|Ef2-?A2ey6%*)(J@ki$iSLH?O;%^?o{K*5J2CRVQtY2OH!Y*7x(|H^UF<0gj zCKc`uISl1e0QZDAQQ*bBAa6UW3#4-j=&WOzo^@NJ7vZxXu}&6uNA@TUvz+tD+rJn z_d-5>=KqLwu0l2q8T99zot4utZOpIo%MfmY*FZA)WR9xn)COQq+ewk4-rM+yv-tZh zos+#a(OLtfBS`AaRe2Je9$oZtlG6xnb>TOE&YLA$s%~SR57Vy&YhB#MyV)d9!Z$KU zWvzZ|=x{mk-6_CU{c`SfiSo_E^@ICsy#J9L|94BkWAZv6HB{!OX+hMt{XL0B_! z^oG&dBvnI%u-VV%!wo_?%Hp@tVSn>Ppk)s-^Y1(M@pqK-G*NoFYPzTksXSF@R6=NP zY>`J|reSpr-7u@b#s0X@ukS^0weTsZzOamyX$wt0oE!2^B!q$6=d>??msFNaJEKME ztFf}fKg}t9^A$TIrG@%wWc9(BSo59sjUTHR%_t+mqPs4sVFS0u@~MfUU4Cx<_4lQv zAj;UThD$2u(!BWU>;am1etOa8Y58&@e)sdRnqN_G^saH1nnj2Q!msTCqpD*YIeqT` zUs}p0TT|e9@k1NLK##G?hu)Z$Vb%`^Mmm{u*PHZ+ApCTHrT|P36L2hq&;FSkrL3Px7%$(-l zoi0Q9Mit4P0~a@ndbey;y$A9$Tb4P8;RoT%+t7ezVut^EU_`uk(}`|WT`9Y!ia_aK zL1hW1X%4WNdXfxo(d{ zCspWPd0K9`8C`H;yrZYreLC>Y==Cyn?D-q9zKHe>;@ptz#IcOg9>^Y=etMoK<`4$U z%2%-sqc?Xr7c*a`@IODay{bMgGRT{`e>yfH(B{#MM;p~65wjV^T>WF!uxLNFNNZaD zN3GXAseNI-=nUO)O%7(?*RfOl;C|5|yYvM7`|;_aiTJ zgwB>t<1nz(#MX@pj%CGSt)|(B&i8S;C7r5S@&GvxzuQRWthnmG#M{G-oK^*1ZGce` z1xcp&=WK9Uxqacqzso=2Fokmu--S6o9b&mV_(U4y9c2s*ckp1~_1gh|1}IR4B9 zWlC+xrPpnf`7V1IDrSibDa|9tF684ZeRvhej3nAFfsav%_EYQ1w5H8|bYFHb!KYLf zKYQ8sXdk*3EPUUQAN3P^cg<%NOH*?9Bs@Rx+HtgJGMI2@Cik6w#KZhJ5TaxT|M)zu zvTz-QGhmVQEHZPFh& zxTmx(Hznx-{9p-9`a}7!>!3Gi=c*YoOPVQrxC^XrKU_Y*t68~$s*n`~jGqQ~_kyCi zXq8B&IpYD{hqW0YFAHayfyI~nTaeTo;;O8|B8MIW=2QsJUR44*W-FdY=H$tY@_L2T zsKSu_B@t1T!G-Eb5d~L%`yW-9c>(>ga!L2@EZJ}|Z_)^?3_r=5kd?mS!D`hwm9B%HUpkt$8s(h zJQ{KA$CO|QfEvywws-t{!qW(KW+b@_jOp+ zwzMkbjcRA9uUoORYwJT0Y5(kndt5vzE@kK`!JaH)*|$8t(%a5AQEHbzU(DNC+Nmsf zcdye;p8NA~hSeC>@SuIhgOi=R!hNrYtEEE0H-}BK(s*R~Yx@t4uY!Ng10rl8IP-K_ z>t&Oa_Z2_Q+*D1j3%4!~8HI3!mcVLl;b!W?9K)H0zMBOnv}vZ!8sNNBzMAM7OnE1B z`!KQ|m84>FOzA>+y=?y1=JCb}-hSVEtW#C8A7`&CJQ-!5)iwc&WwMTDsG;P*#F;*F zTdWc*dsqp0DPAen7w%2hLWco_n1YK|Dn;s7n_~n~X|zj4ppe%^|2m{9k$Q2;1=dcd zW!Y8A?IRQ;zZil4dab;HEF|Xo7nUE9y_%Xm|#xJwRKPUUS;>O+j z=O?QBir!>D(`T{Q=dQ}JT@SC7%6x$-&sVo)i1-@X3E8F_7e`z2c)MnX{;Xic(g7x# z4j;0AaJ=DYU;1MG-%8gpnzpR}viMn6e+=tl(VB&bl*;beRcM@bO7Pi_9OgUxlx8>t z{nQR^KqyV_9Qr5$Fmtc zc6BCQEqaYHQe^*unQZt5 z0y}PZ<$sxNN5zI6hv(0an|)S)q^g*oE!N&sWN;KnHPKweMm%L> zSE>b&4BBx|_vyFH6}Ch6a_rIY9ALUJDV3-X^u5i^VjU7+kSX)e4BTA4{yO=|TFhq*D0yuO-b+p1>+W%N%_ zvs7)uyCF^NMG*~%-J)T5n>Ay$AJ>UVoouCn%oeBhgw%9WQ|Uqe?->=a$)gYRWBD=- z+^_dYa5JK*<{F`VR&KPb$>_}E_Al?t(gVZ=wFZfJ3l(|{zc#AkONm>r7VkrbdUor` zN*XI0UAR^ZV2I(jSt0$>XAd9hsR;7LA>*AEV;>*%-LI&fTfgsjJt983tCdZecKbjg z_DHPWytdgQYNh)Gc*KpBi!>Cb{gM_eh5CY1@tI6RXE&_&t`T7x&224myRXFYQ&SUD z#;!_OawdI9ACqO0Y#EH z{$aHQ6?LV|tDp&B{D=y?$d?i4LlsSo3Z?Qx!qh3vSpE)t%Tou z1JY4xk4Hl@^+lA*lAipxHam< zHZAEt;;ZAN7s4)JzclKnC3Nc(xNrR|C5&9`|NfT$!^ z5iG-t%!QElD}JfF7|C$!=(es;6A@vV(HOoP_zkF0N&^ zo58uTs6x{e^VyPMfK(R4m^?wA;_0kfkT@lfC6A0jXcrtR~+X=T?H!z&2I z;`0WK)zzk~29J8;T(#mY&sUnqGB)-ce5&5x`xU5I*}ebRPR{poZBYu$8Ee^*Li}nE zC&_$Kfd5ooX4E8(>^hCE3vn$R`V7*8InBf+D0_aofP4qLgQ&bxqK$fn)CN0qC2H3! z`8FPHq8R!G`$q{|{~7r(<^+C{i+B|(Y80`3KjG~n_TQ5!1ur8l5-pLbL8{+WPnSbS z02v*xG2y(u+%p`+%Mq_&&oTG#Y_r0!{Y_Nt)@vmQ5L(4K)@L|uhC@i(3hL~7iXhQN z;W(?(k(^6cUk-f<*tU8!c*b(DuGs9?c6+zMc+Rf>~lkL^9k< zaja_=ykvyC+E8HuiJQt_rn10IedtUy@k$=^S}NNU6IOCr|KBYGnT~4#jc4Ut&#`^F zwc`ve6j|(38#qus$r}9Y{Ove9Zrvq^aIMKKyZUGDvA%x8Ri1xQ`$`e28v3X9YxD0m zS)6G_j)gE0JoA;gtmXPfbAsbl8a1U{VLPx-j-v1Ij*oXHUd{+=@=L*bI|labvrR21 zBM3ZC==8V9!@-LLVwxrGN$v_KlBFS0gAbzTN3e$CV;qu&Qmfht+nS&_Yqvr5p$Iip zWucbX0ZN%|p5vKI$|B8kb?*0`ZA&lDE#Vrf9V|AY15d+T5J#EdmUp$K`@V7%+)4=s z^1b#>2igu^igG=Gbt4rHOAEqn6P)ymS7PzQ)Z^rfqFnNP0Ek{ZnWp3KOI1s5Dmidf z$=Zr}WZ;#CIh%1=qGZ|5J>}*DT2kL4Wp6dS{5aFwy4$tT((2DIDTEa>fckYoe^kap zkm5Fh0i;zA(>%80WFZR}RMEDne!G?r4ylS;G0Rx;FYd2`u;sMwrh z9AcedhGV>wtMzRAYo_aWOy|iVIX8@a9U81E;5(!*)5b~qPo<)zRC~-QIe%qeoQ2qx zpFnghch}AxlI^aLh2F3N6ttrj#V7{AEnpIsD&Y(eFs~&KeWF1lx|9d5-eT=%gV_R4n^-gQ?uR;& zTo4Ak3Dp!o4m)=>n(#C-Xa~%uJS1;j_5>UTJb?=@jIT|P;FxS9Cq_GIxs0rb^CfP7 z5SvR?{E~KvVEzv6c-hj3bR>5d?Y5Y?7*t zL}srOc20TBy1fbn@w;{#G$F8V8&N~>@0{C?mUk558#OzE)6T!mias*wlyl=%EZ|Tz zk&-E|e(Mf%Bx$kA?{^bWAZtAEkl=BeK}Nd*fVphkxj3A{KGsmJg#;EEZKVjSFWU;w(_TZ=pR7HNlX z1a{K-5RngJQUY>}agO?K9bPveS+%~`{?xk@^;x2w*aTGZtct?ff*ac5(Am>g4>&)$ zY5ZIvydbjJV%;e)x?k-Tx8{yFSO?c%v%cG|=0Y>Z?AWEdGc_7r}J0 z*U}4p3jqIe(?XOLWV?C!es$o7p^4{wP$63=ipU?Qr|nh#m{JD_uz)4jxNgz=2H6S3SD)VI%OYSf80e>|ZR5S2_LvOyjwmPnhNiWVv!~fC1!%U_4 zj2{tKmB$D~v%35@$h|M27K|XTZgK`~zpUWXx~S+LiN*>|ULG>U_wQ5h%L7&mxR(o< zyW~tXm{ob(SaS#)vl-@{Ku71TRpIU&I z9T6p}hNJIG988qP0}Sf3EhSbj_vgo)glekd#HqE{^qVp=|bP*a)#l3&3grB*78QzU-}=I z+e23OslF^lda-Kfaw^lFFagval$$SdaWDjD?Cr_<%i~Lu^k)^44g5bPP4NoSwKLz_ z!53Tc_m`J~nC*|#zm(P9*j;USvq!QL*tWo$RUhDtM3ngX^Y%?pGUBC8(&{)V+16FT zprWHD7Kd&`G>Jsaxr_1h%@r3$9M4YE>gO5zO6|;)yT9#6Nq5%W+n{6zt8*i5ghVcG z9Tbe|4`qYo-?`WdrdXn)zim z;omLsSYG%!gN~qiR;tD!=;-<7!|DVbu-$r49eJR|sHsThb@Jv-gw4ntx)}uAiOcxdOPUWT~j3dfWNg4CCLP z8utsbmmwlA!u_F2{|}<_Zn&ibn3L9MdO9{quzn_st~&dS1Ja{PT1e5{aKwng=Zrnw0Dfp!G>v3;tr zwul~uD0Jm)6acDiIJrOh_6hRP3FsZw+02G)-|n6|gP>aFK2ch?eRQHw;CR+5Fyrx76qLGQ$4hoyq)!T!|lgIWTN>I1f-u&}s)CmgI`F@Ky5SepX)bNpsHmSWsl08#Z*sO2 zVrX=3P0~Kz#Ttj5B0Iis-HRo!rpea>vIu!dz0HNtOoWjRZ(r-{b;qjK0c=lb(P+71 zH5mGMo^*cF>E%Z@jls8Ett)qCRYWL$PzcSG9)77xTZ4;?dAs6`Xp=LAztrV&=-gLb zq0f)oS=fDsewYloPoBC!o8n~c@vD;1o?ddplyE5#?n|W{M zq1+u6>BNnu6^vrBVI6fg$GL-6OShf;{5239Dm7#;`g>B3k_*zR2H$iW{kuf~1AD}q zak;IDuI|O%f8Dse=aFjV!h+$(ZBLmd8EYeRMoq(6^d{5g;3|KkDPG7dXSQ3#rF4Bb zI{WPFg4O&S;sl-zzPffp^)`vZ<^S8fl*S8xX&=DYv_AsoLoBsrD;sZE}v_5?S9z$MjOj8^b*B)gLU4mnLN&wgkCd_;CvZ5ONHKn%L zygppr!uN>GE>$q`>Ywjdwqm!j$OaMat2XZX0nj3Kpt~P*KPj9hvlHmsb5puJ@Qq|0)k=d+I{io2tFJDjRv>?gyWM@_D||Mm02cVErp&e>Q_$-+ z_N+u;w^r=0jzE;ok?$O(PO_{5jreDZ!qd-y6+TS72gPr+&5y`??e@1?3f)A`*k8KP)`0d^N9YRt4-u^1ZKFPO=vmpEr*YlM#692nm1Z4tn<69SqEp(Wu@1Mu7dU`0@eBMR0Aw09^A{kf_2b@m}h-!OkPg~)OjKyATs#~*Y^*%D@TTDPp)W-n} zBe~O>+0A+0h=X!je(=v{yAU^GqJz$rUHA6XT!mSz19bZ@+W#frR%>B6qn$WSH-$@G zQA>(HZgDk-hEOKbDC6Xa(d9CAq}()6y%`{zm%5H3x*-%6(17vFJv7?e(4ki8T!wXN z0qV2mX2OhapC07bgDz0y2dgEnE%yL~kL3l`Fk`Ld*mp|r247FxbK_(f`ZM={lAl=f zG;C-fey!xUI!@w^E9X8=dDeq1o>pyG8o#q(FIvAvKBx$I5%N_f2wx@q`EZiYRxVgy z_Z88Cq7=;g-o3jDj%NP5qQxDUM^P$j_4^wJE2C0PrcRX(%8OQQ5?l#px>o(&eZCf3-^kp{D@!Yy+Bag^ z{@1u0U@y{+OnxLD_b4rW)(TEy0eo&1D&AD5?(j>b>1D9)fGsTz z-`etaNYB)sdjDuLJ`;>ny(lg{T>s!gi&wTXG(~gu`iR4;QuU-+`p&BUqIZPdkEc@_ zbg%6UzCtQgFH9$z7+R>$UXcZRJ-`@H?b z29OfZ5bIRQNo!T5(=kp2vpqN1p3ilfDgAGgh~CjSyu7jh`Y08hFH3Thz~|^W4i06; zsy7a1({R@UBYN&p&PFLXp;*f9QgdG7BbM1a6GLD^Y10b%E)Ys&hIK(iSBT#;aq%M6 z;*_m$wYFfh$J}7ZS2TyZtr3iNOPLbU}2$o^*&ByflVi2|iqt>oVf*1FpFj2hL;SeuwFj#y*!E{P zt8Syw2^KO*`Z68YZrG{TNV_|NtYs(J*DcUM*psGOH>fsqO?>FhC>$7S_EFf$XS)L6 z_id^{XsVRm)Va23pD`AA1ukoA=bI8NHc*74)5{UZwDw<>UAugv>*}kIPrp1dFcRUr za&8Jnr>iqT|bjFq_obf6vcGcc+gn$HNdOTt8KyZRoA7&R8l3`tJ2Ydx zrdMJCjDI5mvyAZj>qi4e@A}A!lv-a~-Y#GMZhy*#lV3CDMDYmb89kGVF%8lGk+(L> z0fnqJs6gUKVs;HG(cF+c@pcAUtyfo}qik3M>>J6=7~0jFi_8is{{7}bp&nxQlaJ*P ziaNrD^wO?Sa`;l`O^h4`zc4&9&NRwOlyW6-i%;Abo=KJJEGtZZ#5oczVUcfjenL$* zKNlaAwZZH(;DuEkj^=ftYi(crV|A%1gl-=*j;ibe#&i-r0FuFCqq9O<9$g46(LvZY zSUaM;a~Ba!hv^%z*PUrWj=1ya)1fzl-Vi!43y2*wy{YKY6rT3TigYT4y7M)bfyjh5 z{#g3=7d}K8sd2kQ%I6g69m*3@g)bEpMRk=d+xpCOeDR|cKn1#}Vii+Rk}CCfe=r`f zWvwrlHpwgh5fw&Ze=Xsf}}vY~e}fY_Y2>59_w zqmph{?hUeI*Lvnx@;V6mNZJwEj)du)FjyvM`39C`r)&%+Z4kK>mrTh!d&fI=CP$i5 zgDm?$tN3Q+DlRTk-3M*llf3asS+bLL|EPc?heMpk)yAstR1cKC6<9yjv+Lt#WT)Yp z1<8l-Gy@eUG@~G__O#;X`u)Cl^L)zhH2ge$BcyA7UBgB_Y|8&SC`0SJKogyDFqh zFrB+(sw|+E@4TbhOKM#3;E!7Kg5@jopU;Hf+Gx)H+74aWqf2_8qm8m%70l^hc@&Ah zuQUnVSKoF@m;vfaeQbGRYf?PJGy10V{1pJ<&6?qndi@U$8^2I>lh8-0ml{k~Fj7=O z<`T+EHOUsO%zQBF3<(M|DWfyxfB!*f%v!0A6lL)LaoX90#&j3y`CJ}B<02lGDgd(p z%R(sj1KxwB172|<8oVoD8^_qaMLk=(Q6Ym~^^$FXVs{Ua=NY3qN|1r>RqKvGN1YA^ff0G$eX-pBrSM(#R{ju?4%?Inp{g0q3DO8Em z*FRRdpo}*lVrBX7s%LV{tci;8rbpw8x+_9MdEyG16R29D3@We7PKh>p4_qyBI=Qxj z`7A%2zq%T&h{@8s-Pt!kKjr+t9@z5_kVdQpM&R*&Z*YobjBhwsz3$Z8b)kMatstsY z7AUky6=p-P(f9i=NT$Bn-A}-@-gK=cg}4jIKP(w&zf~Bt|ILUISB^!v2B=K7mi)eS zUb)z$shyeg(xp1^i_+fE&6Lo6^$61(d-hZ^`N&T)HbUq-w5yhJLwfc-ZuAIY(J){^ z?;ndajqb9Fszy7LjdDyIDfkW974e@Lx1SyqgaF$Ash3li%ys-q*gn}_H)N2f8-+M5&v$n3C*eU(9Qb8 z@xsipJ7fr$0yw=R^v0}Zvgp2g`wyb8GLN3bH0-ZoDD|yZZd3+fKAVBmX8L}5v<16Y zz*@EtpTcU04>Kx8grJ5vj~Tl9)fsXDkB!X4XVUd;OdQYcAO7s|iYv=v;~)9Z0Bo*II~#@nQO$K2Rs(WoXOT7pz5ObZ2^FBu1g#LfRJLC7)mOc1PQcEN8NFZu<0I zBgPr1LLl$1h3j@Wv{e{2HBImrOsEmudX~nF#y+%J2xi47*GOKF1pk`>$ zz>n0-3h4fP3K?R2?<~|PQhw;6|4SsDho&e3Z@h9#zqS_QOq=v1=@f622dKl8R-}+* zOxG`{@w)L1`_rc%e}|1$40nx$C5ZL@*(7I}*Tt-cc3|X5X6Xgh zD4kRdNyooiENyN$8H*xr&k(b5d{hp<8*wbRU-b4Z#ZBPEIwT0f`5#w>eR8s%k|xe7 zXTf#lRZy_!GufoBIzXzKhiF(GaP29l6g}UPOPfwJRB~_%VBDT16>3b+uWPJ9Pe|Kykpj$ZX%-+S1l(>K%aFK}OMS zUN{0hxR$a|wr%V=OJ`XtXc=Ejbyxaw+;(_dL(WM2c`3&ApF35AyCSdL-MlDz1Nac@ zCWxuF)S&xKGeTdJxND--3=(})yyj@kv4~_o+U(TxsETbk$GgB#}zr9 z*wT^;(O`S%jOM}Ujg8HGs}#>Bq!Z#4)fZX0hN;Mm+Bhb%NnQ+L>A`;b@&xN>x@*s! z(E4ZO$u?j;tRhSAZ~!F$@n2jA?r?*ckXZ{#XOD!0x$YjA#Pvnjv+nSaNXz!VAk`Rm z_M2=-Fb}4_Y@7Byb`xtJdVp8Rzr;W!P16oviF=8|20#S+#ur}_?=$?);=pD^m>N5jnStMDZ<=mn|c6HPHUINDLnGDoWqm< z`eaiZ%?FtPENXXgtTE-nE688h{VVl;VhkkW8;?E6e}~_RnFA`73%T4vgq4Chi?6r% z)5lqsWO0{#D7<});*mZ3kbVzjss<&kd=Qt+cky?@-9F-HBv-}X+qS}0710}*Onbc4 zqO)D^Vy6D1z=hXxkgk=2LYO@0Wc9te32jU_GH8N8;AO``E9DQ_1417^YH=*YVO75P z($b#2j294LF%SPfpDKCP&vpE6b?i#JiA7MG3wV(|bMzD>xUzT6OFK)j&CYe=8v`?X zzS^hoJnT)jEFsL&rZlGMD$uq1Q;-EY4V{g4{Vf&L+=z@eY5;luNZQDCZpaly_k{8ai*15mF_H ziYIK5!Jyk0F4JS%sQ&X0@skzCPhal^sl~P@vA#8)0?(V6U)tE=D)7R-p(nnD+zm0x z`gJrG=d!0Z&|yeQZx1%>c|Loo?&*;aR1AnFqVJ=0G0By~Tk%BCQhM9i+FM1mTAZ~T z@T%^tUMNed3}7VGrdLJ|Ju?9)3yrvl_z!>zMY-6m?agizMfb)R4glJp5I$(2JsKkM zYiha>X!1>-s_^|P%p*kr0oYf_PK-Rlx2*u*GEfV^p{J3ns?o0Ld-%314`(McdM7Q6 zicg2TumkYl3(3~NqTN7itZk(=;VruqB$}gMDc{ef%g!$HJJ0H14j|zRR;Q@#FgYsU z6l8ha@$zv5|Ck?!vV6W8_KpNG{_c-6tk(FB3MdHJZpD_)O4*Y1s~FvplCctVgj(pL zKH<$YZG#xv3m^66!`dSJ^@O&w0V=3X zcbdbVybZ7U0qPF>CuLy^!pZWk?<9WIVY5mVlT?eMfLU-yG1cmT(CPSM9x1x$K-ZAu z1HdSN{UXfC~(2qu1X|s4!Y9u;Kg4yy6)EVBXEulfR@}r}Ezg>U;!dzMR9M<=FtI)33{2 zi!4Dx)7Pl{0EVdTL)F4-|7AtBXG=@M+rrS%Ro1ScE)6*%8nCm=nGZaHkQO!*@wc3`nPelnXq__tu-c}h-@LkVC3t||X$WtK4$<{^ zJMg|ByXwosPP?LcOJk}q3?WCVnKvUw$d3nYQV6DbgD0;%FFkRE+ODN{Rb2Hzc>@zC zTJjsCk2vU6cBUnSRnS){J5uyMAWj7e?Zogg=ugV6+X6K;J*SuMUl>2mJL<~;&%1U? zMP@}sGNL01gcHyVsAB<(To@zF(%DS1yVB+L^h7JRw zu-U}91*-08)1N3=whvwjYKG?~zP4*R2RjX#oZXm6#4TQw$AM98Mi07EX1hPM!GC2= ztZ^>||8;aw9zdf_W$vH;lY<5KIr2jwTvIc!U=I7?dyyXX&gYWvBQl4)scnF~4g{Dj zK~M5ffbXZ3*AU-~%{#@KM3(E6UFKxSi-{lUkpvz856EMGCIl0uUyiI5(6fX9&Z7}C zK#jX|yVEvx%ACG1zMHl<)WCZAaeWDWO=kia;e-D|i&l32Cqw6o z^*pg@NAK4Zo-6L0;$m z@82ykLqEN+51dGT)UEq9=OXV1FRN%3+g2q6s&QJs=}_4Iwaeg~J->@t?Gy!hh;Q)f zHX&r z6@d1!WdK7ZE~TjIo#m91T1!EpnzeBfkEtDftFGqVgsRU5TQ8+$RbR?H%Q+r?>g?(( zo9aXN_LZ=~y(6OZ2I~3<#;g6RjY(kt-M({QZz?Xt9rO+FhjUNy){u3!hha^GWa-HF z=exsfo-v-W3WXA1l}2R?Qya3%(kee&mUD6Htc`vY$eMC1Muz7T;T1h*GXy_ z2ysYzM!*!S8OhLPJ~_(1>W+@*sCii6U4Pi3Hl_r+Q6xi6#dF;aZ!hx`%tqrH;OyTG zxBKCI#8cxh#iw(T`g@Z;lG?^cp~VKXkva5idIs?w{?p$OUC=Oh$RaPk_E^8?f0Q?h zkv|Ux$>(W0QfB6}h|Pk5{x?y@&q_f+jqp#*$Y zR5h2NOFQDP3~C$xN=m(ea2_irVJT|`&k?Ha?fX9+^E&kyd10CyP$W%qc#+=z2|v-< ziQ6s92?u7b3a9eOU9$?)>}I=kX7sw|ejEe0O&nQF;vwtVi}_9)Yd)AKOt}QOqHXsT zc9_%L!mZMIB^mmR>2cF~4@R4VY%laxlxs+HcHm) z8OKkS8L*!3Q$VxnDF9nhEmCa>0MKY0nq{Ev_%N@M=9|l)ZMc$;dW)Nlwv$ezZC$JE z1_ZIiLB#=+yZo3|;uk_DbiXxTb0HClPWi(|_ZeaX9dcjcj_Z@|ak0x*LMnTN8$3vB z3k@~aNU-qE^5xq+ZHcML+Yv}*D1D{29rM;_&a}_Wo@^8=p-<8<_kgQUqu26=C{QYp zx!Gkn9gC7S92r-8H=bxozOLS2sF}DUI4(hSnV*LjUB>I~C?-`h+qR}yC@-#)9t``} z2@7@g)Twp@{fIX}1{aJEBL%P*c##E`Cczi@mMhWg!h8ImaC*vAQSki#90DtxYm*~3 zxrOSp6{;D#jdHHUf`@5E{O9TKp%)P_SswG|!Ew?>aZlXMmw200YHpiGmCO_C97*a- zFmAP)5{ZeC|Ls`64WH~1^yjr#rsKBGtnp{@Q=M(zI? zzk$kIK6^iy0Xj0vRlyXy(CTfU*E$LU&xx(j37JQ4g@wy;izCx@8vRSx+U1bYV(}^} zyJGqE=6;d#3`>cwrl^}q9l`_X`*AimiwrD@zR{b@^I2=AM@_tC zFR&R7oV$@{;w~amKJ8N~i z9F-&w<3PYEy_dKK?}k6*$vPI<(+~XJ{^0lqqAYlIR4GFBH_ZXNT})OUfqw6BuBpiT z*JG=(hy0V=l!#RNS#suPw-elu(=UE38e?utV@Ap#!}Kl_hOI}sh+k{YzfouM=NRQf z;{^8o52H=OvJ1fNd#E8(LtXrTzkO(_*M~#!= zJ(EAMGAP+cef{QKqk!`q1xAZ#+D)VN5S6k zqCUBd+@@fGmeSs&qjM(DtLgBb4KFp7;6_c3{CB;Jbhk z#J<32SOm*Yt{wu^6zyan1*tTyeJ#5Sx=QlqB`1j1U(@;y2b*UAn>bQv`7+m||9+_* z-gpus+rX@j{W9f=`NNF#oEIi<`+ppLdpr~R|9_v)xpcZZB`IQ`63X?ITeO^wn zq~HSrgs@RHY3n@0 zbaeY0VOOoE`}Du%@eFS2Ew4797Wh^RRiB}Is^%I-YF@sk!>u@tD?D@6FB1sdmpb2Z zLw1et`9f8kXEh?jYum6Jnr5TgB`VI?J7$|oXP1vx4pfUxBIw&1*Z}tz!?O~@!O+Br z3Rt$c9&nWbN-T=`Yf38&`7wCCG@G`{lOR9OG|NLdf||H=wwx?M4*r|o$|m;UY6rV| z@0P2(mMN8HTVAt8y!ih2^fq6bny4Um+Fh;mpB*oFw}ZEsNKF4XjBBhitmdB3l;Nud zdMi>}Pieir3dUgr<2Ip0CU%)d*};6+?Nu{7MHfVL1Km*lU{7F2&_D|%;*T5v6h zA5#8KG9Bz3 zPA}S5Ka(dybKZVzfujN_2qO)(e}fQvlFwb2JGW^?zx92}SI}w9Ud)BjgDf{Ol`mB; zEfxMr?%N`tmv!OxRkvI;oP7H%=+vSf{-}B7@I3TrHMh6$RmDmKrYnCOW_(m#z@5Vt zH0zkzoiF{ej>>5|{~CAU#^b9*J~@3AwRVy&uRyBXfiayVK0AhfWR)u*Wl#(OT30oM zV?p`Vu4{)J?@h(*M!^|ZU~&(#q8>Khf}3*ntNWN)%3F;9(8GT!@cQm?24Ajv_ZL31 zcX_LylczuT&yK=^*4*DHxdF8Gz<+l54X?;es|A8pKr2}lP88- ze-5f+OMAlwhw2jHHmI!>LG+(h(A#&(U813YNx$;VooZLk^jDiddO}Yu$Gf*u;+^XW z9-m9;30pbO3+M|bi!2$bQs#<8RazEkLQRlny|~Hhx+;^XbG%S5{;Zva$uIAoU!L}2 zpI4eq>+JksL$hewI^2D_2qB+sWc`h+Z(u>y>9^f~#CWxir?8_8=Fn>xy21@xDvwXN zZw{c<`oTwmjZI%TX`>I&%DZGdE(o`)EK9h7Wc|33+@Fe%P4+&8bv7rR`ez56_>I>A zlbMXb(-^2w4OfS*@BVW-+vlkTT~2C#8FVDXE)tNnc8;ZG*`G~_ zo7Mr&cOY>l1aqU{UoiXoSO+widH&3)rCxrAURA#GiN{G`3$|wlL$%CTh*@4FWyX2W zbdCkakc4ahEc;`y)Zi{;?>M&D0MVj9t*E2QTjE%3`hVtz8wS-bhOCl|QCrB_T=x9m z5aE59xvQ|__10KiE!dd?&nE+UwHTKS+yI*r)I5{1H;+CXIH(DWC67fbY6>7`snA)d zNq5M>t{_7Xc>TgyD#uNX+l|TpifBYUT-jWE;P}7#tG+0+P_V-{K?Qs$cabc6M}};9 zT+V+&Z}TH}G4^wB$Z!>KxBkk+tMrvRO2Ss~xgK>-lf|wZm5ak)9lw(qR^m{Yqo4UH z2zLtH5V2ZU?NSjYa%WqwrjY%T{H_R->$t68arsY2>-3v*WQj#ADPzIb*<} zPmnK#=nfjGLOygbix!bul z2iwANYQVC1u6qgY^CZ1Mo)TUOtCXMWe+or83z)?(Ye{Xz=t5Ys%nqx|DH8YcKw+(F!h~ zRF&iUTyVXY5Y!xF-!u1R*zo=j;QwA`=3;4O% zs{|SZeJI<1`r}(C$1y`DuFkoeyhq_Jvr&6kWgm!qi=;s^P^(4fn@AJ2v^g zY|`cUBq%4eo8af@EHslJra)u`9ojkEaEi`}_a=lU+Wa!aD3dM=TQP%QSGPf3+h zSY@&fHI*7+P2(>F7SclGl&EJ!wp}=1S-Rgl96=kU=_@Qm&I&WIp|{ODxrag#SlfVy zbjwF z6LB_ycVm7bJg% z{+wckiJ;dDNw|<@$U?lUij2j;_VupUF|Li|I-VS{ww!{ei26eoSdUq)wF&lW?ybnbjpTrS2 zJ5#(nrsVRh!$tGb zlIWT-lEhJav_ZR*)j%pdk&ptcG1A%sSW|J-s5|ez=>0eVwpwj)U1Q}FmBWN@_51^S zQ_Ii2QtG8f+HfN6fa}vJ$308l<#yN4sqIyrJ zQv{BwJzfIJy!m0A1#X6c&{JHM#mu00EFr$AOo!jpzn61EC|o}Y;jSf9b8RlM&g|soR2ow`AvJLaB`?`+ZAV1ZI{p9lsrgMW53$g<=PnzMMhFU+4ZD4UTy zQsOZY@)i4-5{5moOQ#F?SAYQsMO{a)2qFUsDqn0q@-N|FD&{}a)n;Lv50PPwky*VH z!0E;BEWfA;D#`Q!?v|sq4cKgNVjN~4v(?zTx^4uXHa9ps-Y956VKqxQ3(H4a>j=R# zUVahXR0W7_L#wSnOKy#Ae!LEPrb=L%`>)EK?Wo=SUPw+G=}2-U-TiQrJLl&QoKF{E z!mZ74Zz%6dz&|_IiNPJBV&gq4uiHi{Wqhnnc7qP?Zx;1W1}e6!j#F$B086W9A*;(+I}WZtuh-rVdiFfPGyO z0~+fCm6SQZtCc*Xk1a5ynp#uZxs|@E;C*P7^XHJGe(M94`dV3Q=OFj!JJBkWgsk3% zw57RWu*GIY&ej65XaR3!RZi(t4MToxJu1xY^iu7RS)eS9!?V%JRn8Po5kW~-#I*qE z5re3g*~!mkLDOAM>K;6H!_R9QQXpAINnUwsju_ zRlyU8Zo^i_-uA#QF5GRlYh5u3w~@n^adkw6I=RQ1wyRd;S;$8;IP~!H;wQ*ac@kHV zG6x@vns)!*&9rLFLoX5`iDON_@5be(A&>B95)vUzw~lFde2LU0AvAoA56USCEwT19 zQ)-!}6l1$vB4WoF6?gh(rr148c~Gt_4cP+nm8wC`bqP$^lhNw9J&~T8GDcE|J|Bt< z1~O2hP)mq=BvQmGGgQZ2aHVdE$_G($Uj5QFOVTl%O+Tbi3RDjpkajZXXM2<|1#(Q5 z#C{Ta;IF0}zJg~MHya#`Uh^8gQQ_eYt>OkvFWfv0>sGoRtYLM`C918=$Zug>m3f5I(sTzyZ16%ETAbyHk#g&7f>==jQ_&3~wS_=Grs4 zdk7O~0xV)JOXa5GHMA)W#ZdBA$KqF{^8ZU9N-oR5f%^eiGP?rQY`7B6IDu&dL`5)5 zVkUp#Z{x6bc{5M)5tD1DvZr$@s#XsWZk>&$ zWS1^+&>0Oa$pVOH&MN0}{)(C2+v>=gz~^tw3lN1;YtB}ad8lVXY;~?jf{Cv2cbdC= zT>-vq>-2<4E#KzxNd>P(abflF$Svu=X}B4sJY2X>Z{i;QE5P?4-Rxc-XmWiU8Sgkj zULQ(NXD*40-A*Uhm3tb53eL;V%JxW_W$u9Bc2gs5-7d~GnL)H_!3|zf;AFMJnI33e zZdYQf>GXuIt2dK#N;`yE1Z@WAtzYaV(5R`1d38CYo`7St3I5qJUoN)-GasdA1wBRN z%=~R*W7w}X5VyjKZ7*jW%cO$<3h-t|Jqvo3nOjG2bhRu&m{*wm`uosU{Hvv1QED$E zmZ@uwycXw7)9Z56Id@t;Xpd!^F}^wXW}x?q9Y<*g5>$+8bIbR>AnII#+f(Vdv_WXQ0DMFl1A_*mN)C?a)<{q4tIex^ zVrV|=yiN;CP1ClygKyp{lz?Qsi|vb$4u_zgGu}a}gW0}z46dVeRY6x4(4ME3VObUi zJVK~a91tqkl*K{zsQPI~8yoqm&Kd$~%z!A*;;6|rXWWR5mNCI*ikdHkfU_#Vh1N;cdhk?7Y~ryO%* zN{%A-!K!ru@2e+-U8T1g1Np`olO0tbd$;JZC#w+U2cu5K=va2#c{iGrr~rnz}ba z$Gmx2A6Ky0e7Ff=l_KpANIP6^@&+>*lqwm?*oc)B0R=ur|MjngBu@%?T1aZxzg~CBL8veITrn0Opt(;uTLTQ*_Wm@GXCSCpkrh^Jq354fOl_%sYDzYEQ~MkKsXZ?s zO&ejQtCM$`F#9j_`aJR|G94#b#&yOYbl+{yvt&pKN+Q21`JD9fb*rt7V1eFgNW;`m zs=td|=+j@eoNH-gOx^&Sg4yz|btB^=*0Q_eKi@|WZ3Bvi#wbvF{ zviw9a6_EM-y|oHDyUT-v%B^#uYM$kD?5|KEY*07ME6MEMR2w8Z`^vNfL%G(>^y06j zRVj@5a;%Z2SK2vz2xqHR@tY{VdoaV+IlUEU^P*L`YT`?uSTKj>H1f!{HfR4j6;R#wNx zClI3$5r7mkxRG+;bFP3fMVSyPlhu5JE1&h33m0-#xVrh8gLtc{$>s6^wf?t_kie z#0wJlTJxfvww7IIzE4MCD{eb#^9V@)>O1reZu}~zId|9MyK!I5EpNeiwvzL;q=|4t zq_N2uF!@kAp(!_^NRJufmo8H?xQ?vYj4oK^<|obERIv zmu5QUPFX37QjK`1;yd43M>PTovhQy()Py_SOg?ZSJJhOiAT5Im9(07^%8dIV?02@q zcXJ02oMb;RCeS&0zsgzyPxQQ58geRXruNmD4}b$*$9l(>S-bY%7_)Q@F^xJDQ$>IR zkmiQ+z2gsDjGqkmmfi9DFr&1^6|F@Z1Lhp6PWqwavQ%L>uR}@*W^O!tIws+TGSnu$ zfehEPDW;Xa2O+I<%=;fh-&y)|&Fxdo0X^@OlEIgaUPR=i`lakS!mQP5Qe&~;Xnk8O zZ}&HaFJg76J))V!rF(;{#3!`aZ0q6eIiDlz%2%v@i;6yDj-1XatPrX9SZn(JHFEb` z;L0%@rYWn;ru~Kgv9K}HEsH`bsvB7iX?4bJLc8f*{+44gzU-}DRNZ0sr%gy#j`M`W zN2j#^rt7;u8Xeknc$b~g*o+p06%AlsuwJ?)CL$yAE)=@2d0i}B-aq!rUa%ZsXzBx- zb(VF;=xi6ZTIbRwDn`viOdav%+N=ywe&PQ$GF+4HIoP8$NsYrMcY0K~#ck zgR9h<6V_h6`!evPUk)sRYGbaya5vUTPt#K~izycle!%UPg}RKO73yU@D2cW;H9yFW zZehLD@7?jz_Yk^A(Yz-fxB1ha( zJrjw_5YBc|6QbsVI-U(ZUy)my{-WaAIp_gga00}+pj|t7Z|{3Ht_|TRTlQ^b8WhdV zip2`b5>cbo#cG;D=?g_-XTwFx_#|E;*QQI!;deQ+JDy3Ry+6gwJ?U9;H2}cSWaU2E z?Jr(GPhSpPSB!3@xVQNDVjOSMj0Z}b&gPzxWZHl1d#Y3WtxDiEbn`vIbKPG^NxtDf zZ1R&5$fWu~{FSF!X^lbF?j=O5um2Mq1>Q3e3R9bS$x8KFv271P^A3YqPxUQxo+rGX z`#qc7ZU?*w$3Xri1dm9ax`Ey3!T#U-u}EKMRNxGH8K?!yJLDM4~v zgnBsGj9y`DeC4)){@v?jK1MfG3tZvM3cl#eQ8C0q^-1?^+BCX8;MW1O*Pib3xfXJ^ zy3fSz+OQww#jPMhhnz8Uzr%01^*lD_V3$^8&L_9Z>h;wLstn0&O{nRQ$n#q@juN}n zqk*er=}*H2p?^Gn(KfS%ICDezyrSFgfQn%qQ(I(4rUsP3~KkH-%Bs;(NCIqkrGr|oELxg zae#?5g1?bu@Yc+oQ(?SQa+gWS~ya|&(>FR_A4?I6qlXk8Q`t zp_FC$H9c>rh)JfJS?#gQcslnc_He*Tf#!zjAx9pYZ* z+&fKMQV+ckhA=awo5l{i0Y{lRH!gBL$GY2n`PfIL`GcQ+Du(12Se6w{E~^~#RYULn z;VPehi$XC%2wO`h^V>WCntF%b@W6Az&ZS;?m4`=Rw|2J#BrZI*U(1K%q{wNq%mk51 zwxp*Mj;BG*E*BQ@8e;HQjQ2hSq!T?kz8`V1 zXzp2bC3-npT4r{TKR%Ob;d1lNHECGS2JUN0s?1perv3ZbxOM}KgIXtjzs%8rZc0u^ zfAx_7;>+1O<aTtfL8C9yp;R*@^vj==1 zMgpk=38qM#AbASe5rsa;<;wD0vLd3L=tiqKR%n8$={A`ug z8{6xd{BgDBWS-!ld0+K={Hz>EdGkJ2{?@7nJ>dT6f27~4FZhXxF_7uByeX*cG;5{4 z+B?j5Ul%!R0}$tT*0*MOF1t4Sx8TvBr8~&#Bi2J!q%(sLi~qyKK!A-M)z?paWiU!- zBfzhEb`=&v{)-N@WUBX?u7ElV@b}yc+2E4wJDW{5{3K zDXhV~hsb~$p3zolRtDt90uTJ72A=sh(`KCa~Sw5+ODX%LgH z8#!hkAAXz972`UHO`$h}?)4u}9Yo6Wtusdoq=J|l@Q+CZ2|v7ho31!^{qz#rrG`HJ zl=%j?8I5J8$1B7zPjX$X>0!HyeW{(MheV3S6G{Nc(5+Oa7j05Ev1i-o+SkYCy7|GG zmz=~JH1uR*zSL$VNVKSO#AwT{D~HOe`949JOCeiDOV(&S@a3r^`GzJ&+BTHhrl@3ff|q;yu0a1@W1y*8j)TBgI+i% zknl;r6s}yFd)3woyB!$Yk5l)2oNP_ng4&1#Dx-~sQIZqgL9eqqwZiAhk->)|y3ELV zMUM~Kz7JC95l8LCE5)a&PEBY7bJ;e6AdTJ1=i`11_r-2Z?iW3%J9)OjgWD@@?fQ35 zY(kNj2Qt!Gd>n@!nt@k3n0VmrBI#p~-PE-5}kV?Z2odcIzxxT7N-YQjo1^Az? z)%7u>L|+THxaFC>^-!d{%=wXGC$lEje0R8~W^Nr8JkIlggD^|>S?4pWBLdxvP8+5( z5K67H0%2L`-CzdP{JUOD2X{wyJ!g9QZAJlm?XLlz5 z)xB4zijRx>yIrQukGk1S+DxDenl5E4$iyf>>uVFKV~Sq;t#o{(Wb6JV|wuBDyF%x{r&yROAvCVrALlRXHinKu@p z;ViKgiq%bSU3jJ!u-;h*so)KR2slAoPbq9cFIyvLC(qj z{kwDuV6Nbg&mb+B8eV)SGZ$vBnEVgM#QcnG=Hpd9?Yy%jg8*nnLuhHU#Ttn;GyQuO}G2~GJba60ncjS200A;S|@6W$eqEcydO@oIJBCoUt$sPOS>J3RQ>?G9Aa%$5~4Hg;Q2M4vayQ%CH-f@2WWlqz1P3W zCPL?dauOfwF0u_R^qW`4C^ngd9?nJlmit5g4`X@hXZ~Iy(g1rPKG?a8Q&3dZ;CQQB zeFzrETvJ%f!SJDIc!*)kz;V)nCefc^-{V8jagd3it4PeX-5#^_PCn zTb-zrbPV-l7L1~%9){axZ&N87gck?BzD)z|A@$LLOgh-(xHH{V%2anCH8Oc)d&q= zY$Dw=rWt)N1gQ<&9)5nEpaKgQPIynVaI)6IB@h+bRNZ3a5|BCtx1o)e)_?N6Z&;wN%k42gOL4Yhb6G{3)@ZT9ILTF}@62=omKha%LzMq5p@amM zl6Y`U85vhlvG>n%c3w8on)6xcSS0m*bEjAOCjYu>J3RD($;xWA?-#d>TNyr@A!|j= z?rGbu08b9ytJc?{eucMYK0$7jJe?~Pu0Ia$)nhkQTbV|nR!r%$yfqX_0C^cHhJqL2 z?Lz$E$#_k3>1tj$uFcn^rm-SK>9~n$)GM-b|EpuGq*Z$Eqww;5t;(H^n+c}&;2zi-KIYTY;Dmmd z>!7LimIZ2yYKml7j87bW^&l{np4=iM%@`-!!junbdYdn(yDRxAwUCDk_M`Awmsbb_ zzO~gI`T^6SsyVMlSR!;oquN|Op>}Na4fF&~Os61_4>a3iSbE5ox zzh28X&$?mC*H7lQH>vm?U-F%hcahLdP&k2CaNOkNQEU?AIpHUezy}$yqodD|m{KjMhwu-Eet%sS_6ydxw>0Tcg}G$gM&wQuoA^ zwd|s^lX&0w=AT<0R$c>`KwQJ+fsz#xQMx3dhLzQ#53QzqPue-_?=4 z;8VC9n<6Zk-3l)?9hAliKnx;)f4=ER@-xMS@;zN}=~7Z-+o_R`KEIFaI7&j=%UyvQ z@4xXAP7pij-%}d;A>5-WmsIA{H9>rZE%3y9X$dhQ0*z=#kFPYh)kRsGsD1LvKRZs4 zP`=~Boa%&OTf@Fgx_(ebiEFXJm)+i7=`TcvRVnR`XIk^Rc^a+u^56ni*Wu;={E?P7?;*GDwZ`qk6#hH&Fv~{ix5-)ca6js*!gY7qyb4m+(@bNImBdkQ z#eV|XgDEeMCjYQb%x9*an!El~=ALUGRaSy<+xv;j1Y}Uc5_e_wsKHg^MPe3jKdMFx z-hVr;S4xAC|060-pM&K?q&4&o$RwFKaEg`NiS54rsC|9C;@Hkd8iw72(^o!<9lJ}7 z|7sm6E7JM=<#INpZ!*a1JPAI~=gcl7C=1ATYS&kYUQU!#=o7oFzJ0owGLS__niqmO z-b=fD+-*iJ6C%ufZPKXa);xKoUm9h3aXYqzK-Umz!h79_*Z_N<+1@TEjHDHqPT*Xux(bTcJk!v;Ayg7UddPa_#KN z-q{{tEnIu3I+Rcu_PSOS@?PTLG(DkFe2!y{o(32=NZ8IAG@3gU9O8s5{a3{MN~q6D z$GC||9gauFHk@8otrjdX<8h?CIe&(V5r1o>+DOG%#gres-f^uTAjr?R_j~~76ZZh* zpeL%dWmo#_5(>u7Rn?eYi+6jEn=QrI$*%$myP+(GF9`ZsqKfmUJIP z4%~g1YJd=axY@)yDZ(xutK?aQC?aHf{I$otJ)5|+?M@@eZ*{j{x8}{=X45n2ZoTL3 zw`omwd&t~7EUxO7KI-Vh1`CM<#(X|{IvWM~0>eW-}`NM^2ES{=4nh2UbcsUgeRV4%ma_*-+%kGrOW z&pS{2OW1m>tEtw@SkoF>Jljp@G^Z>Y0I0dZR0T5liikr0YG0I^r;z;c>1=_6t83A; zH;Ckm2K@hbV<~FF&ACj2cdb4lMW^lA`Ek|Fm8)AM(V%L#hIOetGUanFsB&uPK>pfy zAFQ4=k3KOW-hM(D@UNoVh^!KiI_ajlIFJRMR7SIP2w?n$wBU}y8MD>1jKMRF-7v(oN=v3|)8nfB@@E*{@_bT~-uA_I z%=dQb(MLy0_xsl2kj(tMxC7x{1!6YWj{y#9fA{PbT*--eve-ypwB1ic?(!xuie_fx zK<#z3OMV{t^bj`9z0K1KjS4~-g`9^g3lj`!Dk=T467;QO4fCs-Sxr{#b7IM%m<(O(H;NbfrP8h08SjrT1 znh;wGVL>6}7YZo9sWn&>^g*RtpJv@k=-S)TN^YgWaU!c_oX`su^b1v4)%s1@_)d!K7L`a1>LVDAP?7tZmy)^y#4wC6@=g2v;s5#@nv}I`ILc_nUiVbLcQqcCp#a^Fyo z9!dphpJ{E*A8Z1C%Hx|LcG~ZpL9bM6tt*RE=cFbyxcP*S1`W~T7u&N!5-#&q6*gIs z7?)#1Sox#KCvPTHEJlC|k{dnVVt7M1N|j>%*s*)C!kja?DTTXTxn% zish0Xy%bSCu8kTSiTPDVo`G_{>H1NjDTD5{PPz+iE|}K#CxNg?@(r1*5b7+u&cD6l z*n^J2MT!0W{C=sk1Ei=1rr4(@aWBn-Y?M8q{9bdD}qq>=3 ze-?_Tr5(J-#rtx*XxeV!3=ZP>3g+kVT30>n+Ibi*jS#ELUCS#?v}t#+U59FWdq)t0 z!ZU{}``0VMr>*2jvbd1?@pHe(DWXFnA}1!_TVdFi-5>lXE8&P7;Np12f8h?IQ<$-a zXxkp2X4}>$YekJg)bU~_`)b3CaSz|o>%b4AT~`j`n|y{tJe zr=68ui+vOBB6%J{xwU>vCE-kOu*=*&I4&u;@Kq)%$?o}Gp24odgD&y508OO?JQG*S zv@k;Z^n2T@hF|D^C}g6Y?G844$YZN#_3zewUDJNa4Vj=!VoaYR&FkH4J^?hwpD4)X z+nt*ucr*gcs#jg0P09k?hQ0lgPKu>q&;)4@GnzbB@Z8)M14uc&N(JJDlx#7>f4txg4$zysA#8~%d;B7 z^p|N0VzoyS8@}Bfc0A2%!e+ZqF__`7;Nv3*jo!pfRAwDI8>Whjw_a;Qyqc!p>s1%8g)tm`AZL3{n08*r78SEH^hejePE|64 zEb_&buBZ6TV2MzY8I-> zgt%;^rn^+sHC83{(kFhxCjo+{Jxfk$;*>TNEM!~vCm-Sv9qu%DS8W%RTHfQLh4o&a zJiD&G$t~$ympPHo3)k%&}muU}&=UxsllC#TaG1!EMnF>qQO8MOisZDZ@H zrR%cg_sW+T^-9J6$zDy}L!q?U9n)WiXQILL>`*QC`utTZB>IWSK1{{vU$V5GIPF9(MzZkBp@8p&{f+XNUqnJYrqf7nB^1g%>EeX1Oy@e&F4tZ9ZjSsSQ{IwdE&pE>Gme&S+TNokADNYv==kI6Jn)@ZQ@NpH z=MXf6_lK={nxFyZe|32P@7c^2qk=tsD=Kc&BUm*C?;E?Q&lrb@li?Dcy4i&8kvQxk zNRs8-huv%hSFiS$6fe+nh+~qTR-afOXg;^sGSEfdbDD1lS;1ZxJbDpP={r^$Yqi-m zjh*0z1NI4JjF7n7(IA@;!jqCR(TDwb2-6p*#m?)^tlW$ebCPOd+hiX!EJL}-Tv%sl zDM34(+4jwcK>Nn`?-!UKZ;L*}z9~`A0M=VG_`&hlu{BW4ap(y&zv>-^w0Fk*k>reM zHoZBC+^!SN>0|^}K)2TlCd)Z;vrp%+Y+FGO6HvH5UjGEiRMaSbOaOa=ycG68n&hncp=@7=o+So)I5 ze|Bt+${~_KRs-)*lK%ilUjb^^04ago63rsX5FO;#vKE1bp#tM`xdKrP!c5FLS6C}H z#XH6hx?JswN811kDy75cnRlaFlOjj->Z(dh?&ov@s|Ku!9tRz%8#r7M3S1Si3%u9> zp8)=F^#q4|7@MKc@IBvzor`)ty8N$bGC5Iw&Dhx3>&ug#y|<+XsA{cMCqx@CoBkZ2N$8-R@=x3YfCpt# zA{HHd`WJE3lV@!+;_DJS(YL&MqTt#Lf=%#ly?u8gddUT2SuMqMKY_&o1j3Wn^>hq% z<lpelQV~`5w;0LISiMEN6r#bey(jgnKyzA3eC0Fffq6*}E zfJ@ugLg@Q-*3adOWGnhMY~V5FNx8CY{nC~-3s%Br9E-@6#(7=R%D)*seQMZPztS1- z&r_k})k-}Q;eY@swj{1alD%a2_AzpDtxF>aMGVXqEJf@@w=j(v;^Eq_m2kt~r81^T zz5O@C!;>w=PHosNx5Qnz0dOsUgAn&wO*1Zh0_d+P%7PF z3#(h;qQC0%jQ8HGfwB@*)%!PPq`#BOAJhJ~aAx_(-swFK z0ijG&Ftr!lSp{m67V(X*QAEj$tZDlj$lwFDQp2>bu8ev(wr^1*uyrEz1g@t&Vw|3c zn;@ST(jQwU+?D6nxxE+gR!L($_f`K3=M+$AOKgvcJgHk9Emuv8?n4R^4|qY}J-nJM zd8*8}-zTbfLj|c02<#JzKYen=+ISGw5~I!7dVppYt)Ph2R!RWz73A}3%bxS_h zitG~E|0W!Nm=^lwtLmt>tPsa0U&w(J_zmM*e+}Ao@hL!CIp0VUg;2qF{6owLewxY~ zCB5|4sii0wt;7S%Kot{GuS*ri-^jO|I8fcNiQ9-{Q)kJE%2axUS{9;iu=AqaC}qTK zkiv1W@g~eV0iEa?epNw{AiBu+RNu+Aro@32_7;~Czh8!b99aXfv0L6eC3>lqf?epp zI4d4VvpOBqZ8-fY4`=mwpdj8}ur*d`#ga7eXik}J?vm$9yuw|6K4DwJm0O2W!+fg0 zKCv-2X9S#UuDcjJ@6d4HK%PybUuhpHg{y5o`l$HS_dg?l0^U@m39ZFBmoBrrm*|rHhw64o98;e8tO9#~+EeDD=S8Uv>ITvW* zf-T=Rx&$F(%)Eq82j}GPr`}mK+~#`9Nn)>oHHg3W|GA-;eq%M+4+OLvxa z%@EUnD}is}>#t_=Cc(OVg|sEl`DvI@ce%=@d8UmPx*>h|ytxp12yw9Flq-L3(-~cs zkSn+Nt@y*tYu6p#m*|NlFFSn1o;DEt zz*``NM)CLfRL|zXyy>xqqRSN7SkME~-kNibt23)AYiPy#KsO}zYq_n*oFC44{!x5laK;;f}FCxnGld@hVrNmO}et4 zvHfXhZgI@hYk%Vg{I&owpdG03w}>t&0O9#gKvR<$1SN*o^ZYNh0bk5QB%PA8>*#7^ z;O|2MIWwwt@y&XKq{_o_f~2r){`%+QHgeKWaHBQ@6i5!aKWek~`3i<~KDjQRHY0gK z-SY=uQD->=Njfn1tfesyx&vb#gPqxB6-K=K6PZ9&eHV~Ey~o=(3{wP1=OO!iHgmu2 zt#XTaiyxz5TM4+dmaqtq#X@6l|H9xdT^|TObn9W%k;0oN0&h23nBbrj(rRv;yJ6sK zmh(D1_%^pYvx;!I?fD2rZ${r^^Vap@QD?mp;FJLqy@@w2?+jEaQO;#=a!$(2u{UI? z=vi(@y$osJO_U$4@zIS46vqp$MM*3hm}q{Vg%YpYp74Wy@ONy#9jjxl9qBn4f{-1g zM5xBKzK&TZ-LP<@X#3=@;r_17{hLW}V;R!b>mJe1)nuk#4)fnr*da1ODO~V?O#FV$0mUMcC>WIJ)2L{%FRSeRdqT`AIZ#4$g zo?db*)7e;#lbfjRcl0@3AX2_|FD9N$4)sQ)hO4fXmg1XAbU-q_K83mGP8Qv+8SEV55yAJOrz!? z2PX1|a&6A&OJ40S(Y`IjyyaQen@{-m)2NqWts6^8SG|{wi z)pui`?>ZNVs#5tw8*BomU%o9S|2Td!+noJ+uvbipA1ysH{8)bM(5&Fhi*1N##(}Xqt&AR`vevuHxWzM?sa*KRe8LlFIq)*59@t zvg5a4Hi@QEUqG{$$(@C{>6?%v0T(JwBRGo!?rIH>Ujs7_dnEZyn%mgU=ewE|n^9X% zZP+6vO--XI#+d*6j0=H&`zyDmRPIztU9D?mLuHk6=PC0QX&GjZOfR@hmUx^!YlQfJ zB)xk))BXQH-uvoZU0oeq<&sLZcO@0Es~lEgdv}y1SBg3BUCw5f$|XSyD%w!d2hX$$L9gq7NX@z1CRi7NgG{Q)so!A>@XwTK z22e^G>TGW?;U9--ad(%PCjeu(gEF_61aM>A2C37;3osf&=9w3i2J~O8tQ|3o22)_A zS8dm~cG^O~eIM_bQ6Dz24bol}1x&(V#9mS{QGoy{K(BqV^c z=x5N3w%B9k=afZQiJ%Snl;H2@YpEo7)sGzzT;%6r2#unPWes^O`K+9rbbqia&2~An zn}j|yT_MP2miMha*05^IymVGLP2ZvX%fRq5a6 z7?jHxcJ*rIZWoVPrFPMK&INB_K9-bh@k6RF5wN(6%F+q)!vZ6Y*9d&5o~m|e5M_)9 zj~>we-7TwEhz8@ z`X!&6SWMM-mAq{1f@>Sxcx-(_IQ78H1VS8HILEc6E{{~?&YCSYG(<1vvahml2~vmm z9T_?{KfjWc?aVo5$0v}LIc|hyQ>}~M&Y@-*IVmWM;$YCJKffvN>D=-8rX>bUO+R}Q z0Gmd7uCabn-lqVPHcazGFh6L4h&hhc+-Ig-<|IrGik{gEdmGL&oH1Y|)Q~+r%4xv` zNqEZc7FbA%x*M^A8V{SB4t}5bb&hVDnYYPjQ>cEDFa0*LnntKXRTlZ3cnIIDEoE)# zl|GypXGj3SUQ8Sg|I6{PrlC{2C`W%qC=a39f4-+RY!$4H5}X*9o#EJ}d5BbA7ebAC z0P_{c{hkbp&X1y|c`G?hF3jZ%I-EKB0#`RJkqPO)jl2eFxIcc_r@0m7z{pf)lNbtFrWXq(U(pzO+b}h^Hhv#ju zchsb)X!za%A-#XG!u3KTHQeOAisdV42>7dhB8hOd^&Y1Wzf0 zJDeNyyCB5htu{emA*N64Mzo}nA0Q+yFZDkRdM6ajwA3GTF@tuCY`zSH7mY8E6fRxnoT}XZ!WeNaL*~IkKWp&02qve;fd45# zw1JCxU~&^-N7!lXlNAqfjagb!{lJ0+DXb+9fDwc-er=7DlV0cXkUG3YoW5d478jNna`Kyk}QXrbqx&stNSdpoP~D{&-IoZ&(z zNKNWOIc_aFXe>CERV+d+P-D6CNVOAMU|_eQ45zOoV&pD6Vr`-Z>6iG^Svfg*0Vw5G z-U7}!3YHiUk6@@?pUlyH?v-6)kCjqW<*vB4$q;qO(#y?%EanYZebgxwWf`S^Gflbh z%T+Kdwy<i%-|(87sQ~yTRBy7N^$CVr5yK{y=@CD^iW<>@F*v)X6WO1 zn(}|Jos@z|1iyC(jM< zrt$<>3LW(d>28@&fJT!73wG#MaG{{y-vK(Fct-mc6GR$&Ekte9e*c(}IDnz@nu-wd zm&7W{VEX0AH-(lSAc6kQrPXs&hd%%f6%M4mk#~mvF&cNY>S&=epPh)7LF8Yw4{!tt z)T>SkPbsF)Bpb#oh`BMjzKuGanPvdsxHKcfC`uQqRLi#<;e#r??e#=G%< zAjUV9peaQ>sWEEQ^Lv*uZR}zy=VV7u>z24hIWw{4`%I2!GhNnNb%wgXI_fsyFma-K z0 zxrUn>w45*<8ovLMqTm>Nc$;qJt6=pwMb>ldAkw1L0O(Z+o5-cEl++LYp9Kb9gMY`Qzlp9PKZ-63*xutlF;^n>I z$wSl+m};kgR7%35l4Q~Ax$L%X28OQRkmicaP)Lcku;aNV1-FQID>r&pG81wHKKUVqq9)EpaMQzje zW;X0OJ1&nsMk`~P#!gM5=JnMlXZ4GMuQN)_#<} z03isW22RG0zMsA9c4%-B{zE6wkN=WO*bLvv!+&J|{6=k2QiiKvX=8U!^Qts#*FW-l89m#|!q|xl_qKO4R{PR=cMPdH zQy;coHOQ1b*%YE-X*XrhcYs8Mjr_Zsqsm;DPeAPxep+5)%Hr>+u zyicrar=O~i!-+|U+;@q#*5FTl+D0r42Ii3AH+|&;5m*b~kAm?EZ~4=qH~pHm*7O&A zze$cSQQZIrRaQHr`E9nqPpcU*@d|HBx~vQiUp>^96KOrr2L65&34_=BYeLufygfUi zZ??duL-@|;iN}vw68Fb4Pu6WL)?Em&8-k2*m-RqzwCo+$;so z9o3l+=HZ&^*!#O?KnpN3Ip{jRLuK}@@?PQT>dYm`)2_ReM{?ZQsV7IxHKg|CQeI!T z?}}9|J`jNBL9g?X*wNwVG2)|M4x3LEL1TT*uROIa$kOzA?z7^U7gxi6|3uraVcpX5 z$bPH@BPYUPogd=f$_yM1N}+Wab5UD~rJ7Fy|4iK~-={=OzpX%R7B2)PUea;X!Q~SM zB&I>?Q|36^k})G}_Q!PgVi9drWAV)N_6ll&{7}hG&V@VJh8ZRQkhBap`5I z;t`FX7l4AQR%aX}V{O{%(x_BtQ22j96Sc%;@;rOPtXKI_I*on_?4XbFXn)~w5dZ@4 zlQUC2si17=KS$r zEjH+j2{i#Oh#*?1YV+#_&2`f^h>hL)qL-a}`1?z#V%OF0`xAQW*h!?_?Tn`l%eviz z%KQ5noFXP$^%|*y0a0JqcH&~+@<9WaaeI8bK=&);r;e>k-QaC}W$0(G!mkEaA1A9z&ng6nn->Iq~`+ex{$rhv2s3ok)||ZCobH>j^IDd+@iKq@71S4 z)yEppvZ;0_k!N?MF36dJdV{LE(l;R(DQVhwv&H#Ui-?-A{+jD=-!8ktEuVxrY;(7I zeSe|UCUerGc!(TceTgz{9(On$=NU&0>zr{`e-TV)D$x!d1;+8wb%MKpbuQsnLRTI@IeGXH@dy&U`(BHa8YpzJ*k8Cla8LExJ=V~g1WQTZq%(-2TKAo2lJdW| zxi*yT&7}%b#Bf-TG_bL6MAsw#@+S#hogk)v^GfG@{e3Y8Ty6rW8qwlId(4ED&F*e? zjyv(#Xi-cnYU?t{O%^ImnL{P+sQ*n{Z!y`q99MkFXp>=A4}A%n!4kkyp%y_d#Q5+q z_4SUY=Sq}8qLcopT=NFXSA?OrYQSf-p3v>33Kx;9Q&t&k-B-m>v^MH@jhW$ySd<@t zbYTK_Z_a%nIOA2xnx3AUWK`d{6jA7aU7#s)nZz@s?u-!BLN!#E=z%xIRw?r(2gPp0 z`$!W#{S`0V%-GP!-|hZe_PI@i3Q(6!1%rYqkw8cMc~9)re%t14{$*&u>6qn6+!5{PIJ6$bo!W3AR|Pv zyreFvejDB?W7OX0-ENG9nt#^lUoa^Q)N+{q7QI zlm?MzAJ*ZWFYNoOy4=RTM}~I$Fg_$4)@{e8@C0oMm-nhYsmn>(y4Wh63>KBa3zgdK^(<2l+?rFd z@D|-~s6-7q@E28W%$22&0%-TNmiG-Yk@?RW2oRV0^UFNq(uKWQdg(-Hymy@(=gf-8gV>0Ck9--JE?i_dJhneZ7+9xR7gTTZCHR=FRz;4DS}> zNV&hz5fAK~OKs|O*>P7r;u1V}I&7PtRbPFCsImELwU%tHE?iGn4cL>V;OM?G2Ubz{ z%LU0*NILmnUlWd6R)okpEdFA*QbZA1$RB+QBqiVHbn!#Im%5VPv184x$(v7 z6h(+KWm1vorQ?NPVp~$rOmvsho?}VVsGPZ(!d;Vr-ImKPp-zTQ1sg8B(H0MLn9tqo z`__J6Ot_bQg>1SITpl|=SviOsBE`cbTyG$Q1)Vq|G!eE#o#t2JlALS1w^M(n+OGNJ zo(jro?AC9YWj=9yLtKhFeP{&Zy5(9we3doq_07uL=;c0Z_E2`vkoqj4p55ZHdaRR* z&h{B;ZsVZOL7w7*h_8bT5Y9OpBU&S+9Cg|4Xz`#z#M-g$a{gLH^4Vi;~;^y)DGe@gNr-R zrZlZ(x_e`3d_R1{NVDB&NtN<}<)LycMprRwqGAJ)`_=LQ&cOll!o}1lR7DU4x*6UCr!+$W!;KL#3ovV6s`y>`Mx+PAt)N;lQwT-5!Zv8X>?XJs%+SStW*dL*JwUm_C(OKv9DYWwfhDtSV*Q{yV@+gY8-~EXJWJD0Qy&vFb5<7HXluTa zcd|Ck?prVY#hZDW{wQ}T?J>i2!QEG&T)`jFAe4Vnb7&u(Z3BcLT#~goIs>|i<{ZR=rZ;v5gE-1+h7LHJA9-0aKjv z%ehEh&Y_lwEcP6UuqF0J2&c(Kk=!rTCY|0lx#CWglhO;Esuo;cdMhbSh>6$abziwm zvsaqX0rGrxEcBWhS4q>!AD?) zq2cGnn8#xu+)Hq3_1oneF>GM?d-t>$jKp%IBs24KQ%+h$Q%vaQ9>uk;vQ(fi92Hb$ z&aQ|gn?Lfrg=%~hqh1zMF|G$V%kdq%KTW^(Kdn3|yF3~gw`mF^< z%0{UxGzGFTg7D7H`8DVqAnxpgj?>yPfuc9T47A|nA2@BOV3(B|HL@sA9eOp03ohhS zSy>d6({x3tPG-ZA?IMBe8a;s)pT&6ncQJNA6)wQ!OEsOw*u@Cl!mP~Y33C?3o!{1VCos;f#p!~7PqLBAQGm~A+pNt=@@C+y+Src6pu+@@-xa9ba^b8 z*^3qWUJGe1^!GOkE~;2_urb=B$4i2OTMBj*Fw4NEB5I52RdOB% z+I-(`e$_9xS9Ya$sa8y-vwZ23e2!++45-m?=Us8cQt=sH|G?ahaa z+g}`D?el-&`s`a>x3F@Rpte&UcF^V$Ch`th``Q?Anb**Vrk`aaa=F_+R(_-ab0%6M z{@=66-KbVHsn6tmE;`9OF+de#iE9A6{JVfpnQHs=O1icgv39@qQ|3g1#mU-^B?|Ou zf5oC�u5s|JaMDZBX6diQes9eLDn`*hv~H?f2y>-Nx2Wh%iB(`1Y2|@5im0TS)A$ z@!VD{FS~o(6_R#Ae8gfsA2}VSN&fS$#{L+BQ%1vSNo@sNdnD;W9!KNj zHK?>%+}}KlqM(67{s}*a+r8#5I_Y3oLhqD=k{&vJ= zGoU%lcxB^8C7H)q{VtC67R1vBSvFlC$-wGFn|ROYrMLXA+N350R30^00oXkqG9+0E zX@ceFmf#d*Q=VoXR{$i<}jansKzwax4$_oyJZmjotVrMIqwWO*G=DAgJYf0J&T3?0xdY5G?k)c+YN zCaO;IDt189@VRVBk5?(Si~^4hOpVnVbkfb zc759X)Q%pA!Ig+NYMYf2NM9YLg&8Sy5if2#?Hz$VJRMyzHC2)d)mSE&LlT(kZiPZ7EVnmPCD_7a@0wc`Ow3Q)%Q#@I$Tqep>RzO z64*w6^!^b-75#prfi55b)6W`qQ+K1z;D_?JlK$3s2rxmz{U>0(8gm))!I)?LxK9`o z9}sEXf&L-->$dBm;AZnRYmK{d~Td9 zzE*DHTn;wf@ZCc%j`+eCb@n$3#~FSxVbPR(1-E)Km#75uo8Q|#GI7A>p!J59Yn4H3 z^f#t`D(3Zp9e(96x5zW}G&cSTO6SDcP zmpf(g#Ut?3Clmp0oQmT5;lz9gb`QuAsC^*XWnTOg0_#peiU|}p+OuD)5%44Lc zVqi*q!UqUzqoFl6U0+KUTCRT)iLxLmg?xW`-&7#Yo(mNh($T>QeO3)#4Sjo@fB7Dk z0<->FKP^nCj@NWUdN@6$GPh}m;9B`T`tkh_P`{#g2a$qE;~1}<1qeBA+MPDQ_n4Hp z{26|4U#WkKJfFg`<11y&6lGG zR~s!nAo|{GS2P_j3&~PsDzlCFi5UdOMu+b1h`H_z%Vvj!QR8msKkF%oj@EF(K8^R@ zM0XaCEG@dzY>MFb@m+{*tLq>)lUfcTCsUkmd|t_f#AcsWUi-VyhMy(j}g3O74Y z4WOY_(gb9UGJZnv=S_(y9V3r7GeVh;pB$V93NE3Z^3dXgaE-T=;GyiLz8-Z5A_otQ zux1&(dvX-0RO>lg6X!z$@e%M!WgYcshHr z{=Aw)j*Jr>O`2AiJI7hp@b_PVD#9wL=xI;YCv<{ym4P!a^#tk5`nMl5$LBE)F00*s8;{RDQ7w{*sM>mJ# z%ujDGG2Q+KAJtLADkZgS`*~>zEE4=ye~dD9iGMk!^?3J87khlnSJe2{$6FZzlww^} z!#y|PJTU+(R=iTCN_5=?rrp_Y41SeorD+0tHQ1^0OB_9JxjPh{2}^Em&; znT40uW@o0Cec(sEpU+w~oW$LfK>UDVsBI!Q*}xaCP`fGH)X=%XSQmK_BQ|I$+l6ED zP%9op&V!MJcWeD1$_4K_Z}UU|EZ}_9-(51VHfIatA{ki zQ4egL0GE!E)S#aOcrj`Si#QL{s zNa&HiNz0s5-7Zxa{O5vGL(FTFwt5f7_3D0K+_SwzQT%~!}@yY)qvU% z)LZ9|TBx0!z+q_4<*`pgp$joh(-H6Lv6C3~2|5n7EeJPX z854P|O~XaK=j+5#@6VOEc0nr z=Mm}!<2uvT%nx{*%vRnp9=x*FnKpf)K~0}FglxFUP?p{e{w2IRIKwSli3geY?BgFg zcDY;r-~0H}xq4gNcfJe^?!sz}l(u)LY*QkFk}=N+u;-kiGuT4D@~PmzgShvv=tM;o zRxPj&jb*pzj_VKTH|aL#-3&?#uL7Jn{?kjwNk6L9gz;O9aZ0oBNT9)~R|_9yk-PB0 zeGM#V4m9+u~J4w9DXj41b@3KCFLcLt7dT88~3mw`dMB zMSs>7bDK_gBo|;$QCH^P!W@^X#7|iirFR^Lg4;EaP4V&4{Ih0ZA&~YIZq#;HlG(9u zzhoKu&zj@ilws6ruyHyAga((16mc$xzo>V09sZne@g|os;Q2|Nbsgg_{r8cUPqiv; z%vkweSnKWxI(lnS@Exj%K;>&^=kM|3ZK)8#c$lp0dfvDG(jzU;$PSr=1xs>kZyg8?kGb z>qT`LyUov19T3Z%bof8b!O$X&7xz`B^N@vcs@W;Hc&Gr?Lj_(urnAk1t-`W_qBDFI zOSbvKJ{Ppq zv!b*NJ7?GvL`d)9roYBJ%vou#S|UxGb-~$@bvb41`Kc)yZ^MLMd!{mw%u5x0YClTC zXO)EwQx8;iD57ft_c`~y;E7ioohXi*a)|%vA6jFbYNFdk`lRk3#+^#c)weC$l|Yl7 z5bkjpUh1m)bCwLSiU%`8?(wz(BUT|j4ZtHIL7}$%RvijAs;sH2zJ4(DxTaSt7DTg< z+LU{nFogEp7=+n{L(fPc<2he__d@@mL*+;@AnGnLDZb!?e6&rhi21LWS`7&}=8 zq#T7x7f0d`>$5F%=5Z@V)fE90T;3p6{TFjEjyo{?&zg`-{V>G8;Xk3k4AB?v&fr=f z2?W<>-{LtVsAe52Y_fZ4Rh5Vj5bGCvU`r>bAJfdY4$8s)ct8$AZKpdeK0^I#W$vRL zA{baWdL;=I#B>GjJutUyU;yh+3qu&1SPPoMez1DKZ0 z#9xjpTuw6w!dKc-(I4G#?3HVv12+KoBK}t;zg;n`gGh!`A_)n5$^^5d=`gi}9F+YJ zE_X*oLQ>#|N2|{AJ@>0}S#1(-Gkb&!BqY2MZKg}j`$R(gThp>WfY^tLdft=QCTE(= z1i7_%W?0bZ!P8o@X1ks|okI(j0&1}p$+q)#I?pKWpEYJ^Xs?#1&X!va>W*kODQ&TF zY8PeM&Y5%r8*OR8g&&fNx9pHi;DH0+f^=3B|oT8%NHaA%~2 z^1qdIX`g2u;j!QTkcmXVU#uB}+`9(V-a`p#Y3)H@M$z(Tevj2W&-2f2QM#ll%ZuWp z;7R^0$w?}LKa|`(@0~^;W%D55X((c#|QpQd{4ax2SS=iK^iZ zPq5qv451RBlkrCO)=$-bamRqg*4HeZGrNTjAXGa-(`6PS*1!E}h;B{)JddxonDp~? zx+Bg;!@xgDNz;2!ddTkl{_ayaS9{Y_seq3anY9E!HU68>CDwQEfIDFJp#}@1{TXQ5 zaNzMaBH9XUqzJJA^}Exl{{EVr^z7*2d~57+Kr=%X)ca-AVynH@7&6xx4(_7BD4Y6PXCBwBk@z~dtTlA-6HWbX54EaLW9wP{(bM}Q8-12@q zl80A}RnWE_blr16^tJBLp?}sC9pp;dJ{PtBvj$meRDoQ)-}d4-U~QWDDamftQah=K zOL3PVFk`Z|JiP(!AcHE=*6~A90mo3(DEh7i?4R%_;0p8Z&8Nv(K!b z-e62k!>|zT7L>(KyL$t98uh5gbFWo`jghT8Uv3+JgGXKj6t(;8mv`;_{`jsAH*A2< ztRR2>pXF#(c`5hp^jSiXKw^R8NLwcIaR})RgdxuUrpViKJfYCNa+Gg81}e#K&VDh{ z*HbM#LR+8|@u=9l&cYl=DR$$P4}jA@MopJPQmytPTD|tvM^I#E8d(B}W*G&lQP%H6 zR(owH$!$sf)??fq*G4fi(n76BgGYovSjFA!#`15$-ZP zX%R#;*Hq+vPN>|mbC5808H`e-*vhsyv+%1|U|Gp-7=hU(`T}hCqu%KKNSdLRe#{U1 z^9bs-s@>=#Z%RuZMFq15{p}tZMy-5Dhip(w|Kw1K?_?dQWl72{ z&%HOs((xz|5kV^vujti=O0TSjo0c`;Ll_Y>@Vhq&i7XpZ)7@CIYwgGz9~l1_DR|s# zwSL&ix__W|zo-!xc;zDGaoG~#XXWZirfv7T&m_f*gvG`enW?N@8hh!p?I5%?+9nP$ zT@&%KOCZ@jvtLVIw6Z_D#i@y~ePt7(TFXZqjb>;z<~LdhoZ~5(hv2?!%OP(6cS^0j z+LzF9IqBw6*;T^EDPh(87RQ>2NUq;rsp$P6p*cv}w}hXaix#<6?Cx`94z8FzkGPrT zkE%Op9v|PR?8C?=l<6WX9*=PgLwnu5=lL zhbhO6OW1^bsnM5Stb4_fqd?y2?~Q}eO<3TIR;y`XOL^*w z4W%x}K3vVgCOJ&Q58pHuMNOVGzlDF`HM`bp;2s8f|9J$}Z8`|dMCq8QP4;Jd2SyAC z@zq#GEj{k{X_wxY!5PAo4vCCV#as!mtamgu!0Iu&P=VHFu2t$yJ?V>NwoePfI9S2>4 z2|%u=d2vaed5xvWCsMf zyrS1P`A?7+CwNp3Np}6jYqyQ_#D^M+!j>`TT)^OOKDrY$#`h^+EqDD5M+$m$+D^1@49rWZTdULOSiFt5P|Zw?obKJp?14#8E-u9TD?7vl4*W-HTQ)4 z%Prk2J^8c0m`7#im~RTlBBtM6O}AnWap8u{Kz=>yqsTi6tNOlFbWeD+x#VcypCzYH z%-BNzHEWBzq)`8}<{pPXo8O8UVFx8I*0AHX`Ah7zV~9MtJ@9Jd-XAy^dh`yEErJO;J)2&zyy_AVfTK_gmgE2%rBT0=CY1=jtwjgIUHrxlE~R{+yO|M4TO%{FQj6p z1oR^8Q8bz4J}V8{Oh_`aH{&%eeSW7LmhFWhTo*2T{%A z28~0YXcrxkGEtJ@?!S9|GFPre%pXPTM1GS1zDt#uy98cet}l_xPvm$V|@Ez9XRw z%kE*922_DCt_=Asb5wct+0g+z?sZlaGXbPYmCsS;UOFXb=cquA8%0$10jZB$ut|#b46#d1ryu^H*+7X}038+5Xx^Im_Kk>!;L<+noQ% zNT*xXfT`F6QfO(hZp`mLoQUh9dNu>F-2ShO+kbnxsLyQ`vbTNN-(~L->M0_OJ&~c% zM?AVgm}KUg|!>I%T@_1pfozh!SpnuVCae%@kfj z#Q2#X1G1{H{Mud!q0O_2VrNy+Ki7-+1^?Xyfns3OCBfs0MMDt7wv?P@oB|ya$e=Q6T!X>T5*s z%52zMa5rh5M(QorMW{s#t*7jAh3#JdmGX7FJr9|^C~9l#wo%pA!^VQbT0@}bD;u&j zbWprSFzn7ul6QO=kAt1w+&!(KE+Os9!tS;lPr~dA4Ahx+Lv|Mon40 z7LI(=9qYHQ%N9MIbq4H|@HvXk?~BSb@ooQLsx5e%3~3Kr4Ym zW{2-5<>z8s#K>sf5*@cuxCKmFoA4Izhib-J9W1yYu4qm$f@mKm{K`C#V)@US5(gPV z&2I6{d5fQWKFGUMFbxEYOXDNluP9QCf}})$r76uEn)W&gkxd>N$D#=6WL!XFeL7xd z1_h^bHF9M4-s#@qOl>AigF8DrzyLm-h!j7g0j6`&to!a}v+s(}2?bK`1#tz=#^e;- zG#x4bXH8G{wtZnf9kLSX+y`DqDh_6@BEYWoM;!Ey&t&#fY!;zdKy+J9)@@N~|7KpO zh+Pjz-6P)NFAoSgMU22PnT;Tqt;Y^g#|*I} z`u6#ZpV_ls;7JPZmN}}sWJ>&_yPIdTQNzWjifufuRF!4PJ2d)Nr!_7#lk8x1X}J&! z)BT&S*pQ*Q1|oRihIL%JYGM2Mu%1mE^?$Mez+w+XPtcTyppUoV?Fs%R-P)p?mQZbV z2Ky}2v-msG-NgLzC!-<{c0BerDc1!gfu=|*TW07-yTuMG!4__a^6B4#wTquMg}??P zY?0R>XG#xhAj-oNFRx#cBrqGWQPAqnJq!V?yUr7jQf?v`1mo6$H4f0+sHay|4=-N| zHxCL74Uj3E7}}yRt!w5a>Y>J_+ArauOEukt#fh?t#AFz_jrlA-fgi)!4`@sZY$Q1^ z^4rpdakZ`sF;{vDfyViZ6MHGX#GqSaVu}DHAA1hQDLR zynEjJ0w?zvK!{|W1}INe#Z!r3JaqAN#g|u=9@AOuaW`x(%U$~hfn7}V`#tRsN&7=8 z|GT@ZQ}uk+!Gw}&?4lWPN}^v(ZuKgh8#j*cAVtq)-yq^e)WGLKoI+_x{7yUa;U?Er z=F}o^jxAQwL6?6^Nfe?}tO!Y`H{W+l9`mO5))BSCUMg-o$_RcKcY#$`Cq73SbLUdxsqVJ0QL@|HdXZ}LPFQ=mJbK>ScE+5}nzw^G`vs)e) z8Vt?sAJeG7S<`QrgmT*wN#=cb&oY1zm*qRb4=m@{L zade&RDnvRvjHbWWaU2cNYgf~vIuXM??6Hi?>hkr-vl)hS(1{djlx&YbZ63)xxrwG9 z%V^`0M_5Mh-Va%?=cS<0`f!d8(tyMPn*6_mfgI-O-0-+1yQUu_rlZ&Ad}R&0A3X>d z(n}+FciJF15gY-;55Nw@Da}nnc(`AE9$gt>ZH#1v!sjjx5Te*gB~oHK)OrhC*c@8E zXxES^usTjC1uV6hSa$lI{F579TcEM4TX#&B2bt{^8{^N#!kPqI*S3{9erXitS>*+23vPuuEO&5V`SBx7GN-VcTY?mSRdA8?r^CJtp7_Nc_I*~Ck|QBb zLR-afGy)`z*1s8mj5==cc8gjk(YzVu*JxY>XVu)<;$&#mu-O33X3eQV=>KsWWUDpME=+918COWw*&E!)P;zn;F4 z+X)%lvdST(qNZ#rK%Zw0U5<3FlyS5~iPjBdU7T`?k)z0S{X` zMNa-_%`eNeB>djpddV*%0LfkJsHm6H%#1n{j zBY?-Ex`&oDzWyrJeT7*7dp;#0C@uywUvSwlK0OvR)& z)DDZN!G%ivaaL4LpA=gLLPFptorO-xgZK1FafUGfk3Ke+JVY-ozNZfCw)TH6yiL9Q zTA6+ax6jh><7ll-1@n?D%-ftCLXq~097Ko5DA(`Ko>}ebndO#;RMQ)G zO9*Ve6l!5^lE?BifP2w}Sl*^~!g}KrjpNNIs;46(a45RGcQ=so(MN1DF{P&3qW!VR z1)HwVq?Tx2(bF$Jy=ZgRSVNF6r*V(NJH;EE?(mTo|H329hl3?B;mkvwMJ!5vu4%RT zlNv!l!@6b#3tBPQyf+ORr}K$Ufgb#6zDUDokI~3>wvwbiQ0oC9YT;y5pZr)qV^PeV z&cG^wKWAh22b~q+*YaB4c#g5*AFXAIEehiwqW+$J2NFXdq~N@DIE%JY<7j`~lZI01 zNKKloc#C&}%`T(+KEsKb$qNzt$205DEGz$`@g*HM3SD0$Oi2)~StF5ERa3z-R;-e0 z+xe1V3p@gy9!?WMo>biEM_meWKlN2YvF)E$@gDBEuWk`t?Rx~HDm1hSW9e*O)7K%f z#?VQQ2Mc=@gd}3E?d@gN-?zSu*%!QlN$7l7%FBCw)t;Jeja%#Njs-@+`OseU_u9UF zc`U>69zGaKd1@|9%wA59b{(c2dOCHRkgd#rB|-ctQYKFQd~HAWz&I+4Jwi=~HOJ)3 z8!?mV*1P>dH)=Qc9h-<)bG_+!3qVi+KDpzN4$_F+`EsEBX31f3-b3_+*36+YqX~|( zFntw?-qTbtzJu?VPK~{=%TF`{1Dv969}Vf)a*Bzr_kWd|a={qwL=il+pujutX6eAQ z&`V#Lc{wQm`KXJ0)Omb6Twfw+$=uo@wRIhgd>cmca?e(}QCc;YlSX~|=;*fp1YLaZ z&I7^meks!G_I%GdoW+FXU?-3Fe@Xm=$bP zuJ#EXGPd`fPmq76^)ePH$huhfcHXe?XK*oc7=5UNx~7;vLgldBK|PyEH$U%8V3$@@ zC^eLyObV{^sltNR5{!dbuhTDaQdAeCvbkN6u5St@`2>BY^VXt>{6p;V4EIsa%<1{z z;k$OQlsd362K-_Xt@%R$s!QCp77ngGy4w(Zk6^^%P_Je@GU!X1R-_Z(7Tv4!*H1Hk ziL-X>br9Nh`$?$Wl=A5z>MLFY>#~fHg3{xC5?g;?Ja|5XL;O+K%EkBXU7G7dlh8RH zcnsj(bNfKHny0+i=2B3dzwTLs8|diU+b+UCYR^47m_bPqJ#+b0x?z!SMs#F=8%J$@ zJ>MZD`u|aME{;t1@Bi=fx$nE~j_#5au}|etNvoVnvR`$Vkk*M|*ey8@OU}&py(=W- zme`HhT|&&PFblJlV-BM*vYE9x<+#mZ2Rq!qSAT#G-mmxjbzRTvc|D#EJ8Uk|qS0eW z&L`TAzfM`$bTw9}^VU3UdaKiAx%d=%$dIA}(qN10d%hK-Mdo2I-Th0&+TT>bV1H4? z`-)`(mMl$VaPx4~sId!wWfU<*d9K;<{cz(d&E7}dgajKX)f3WgiT6yZ=BKaU^J+sA z=ihh$$dGq$6M9_GYidV6BEetEwwR z$A>UQR!O1oJ_aw_h3;(ACdz=n{OSTO2;N2%)ypDjI49D~Jd&wn<;=PT7%69EZ6vA= zPQ-h|_^+4Ocl<_VTLJk+TIKvRb-gThJgZl;rwX+z*kUTIw>79*4+hCzS5eixm#0>L~CEtI>zrhSbuTa)Lnq>Ult^SWTSk z$N^Y>7&WU$J&F>=Q_+rU5Z|Ydf89cEQj!stAnmuKXYT`aq77`Vi-dPp|6_NhyUXgu zp;*ahUUZbfztP@yZ|F)R^MU>KCt{N-$Q)+|001Z~Y`+ zcU7q5HcN^QW&~&!6SP`+TAjs*QAttJ+Eh7yS*{ z^XrXE6Q+fK@E6n~^)Kj7#!;*fY#TQITanw{_WGA3)R&<01U+M4abZ5Kg(Z7dvkK;_YZgsj748-whpUFzP-l3g@Bhp}CNtbTXkp$= zu?__gFdoFkaSR}BdQWpJ&YpYDi8>kF&5RFlQ_%1q(ArX=JfR@`wFV~U6(TQ;RMP6v zK!(;=k)u&>VXbDE9rD;)@=hTo7h`j&Ja{>2%0GItkG<-~jQ$Nz|#C^kkj$T4P)s`!O{nI8C6V@jV^mXAS0;RGek zF3{5H_>xt;A&MwEJ~wN>MTL{V0F_jlEzw=zOB`Agl4nxa<|`mAAWD*HOpAYZTqJON z_Rjun+{(vwLWY%j;(59`5lo84! zy!HW!tThq(+gF$UB9j7g6hE;mM@~b~I=-H9qX3lwe0G=Rcj&=iqF%>SRJ$#NEm%|a zb1Z^_)~m|ie9VaYXNe0F(2Z2mT4osF=)5JYBnQ+TiI?jpaL@D7*mWi zV>vSKD*LzDMRXH%rB%J8e>b?;yHfffzmBp><WP$?fxIf^idrto!g*~85Ov&P_9@5@a9}mU1D1{VfLtQ4+{nt{HTgLs+-4% znz%H=`OIh(U4qMpF>OTK6Bws6cXR z`01+X>0{#u*s;5_?XSo{WLz9EopJ6QgL=?CGk&^=R1-LUt)xsMZ8^KS>WFzxwa?OqiX$UcjK>}cREe)j-Y{| z{8btBF{;j(HC%BpxbgX4ty?WlRfa1_z4Y@EpBCjF96%D^tIM`#H*zZZGIkKIAM&&2 z0QT+|D;HTHq?=CDXqCE_Orc%OVk!{pQ<@BiN_wj+jmM4V*A*`l1-KsX$1C(Y`6+9*0M?! z5+0i5ELwBOFOKr#^IWS*juKjn84J2-V{c6hX3o5LRtdC)#@cB3*R9{C3!$TAtFu#F z)hQ>Y%EcYXFn;()jn{T128F{$>E1m)$nF|U9g{*|521Uvr`^9^Vu(V?e=$G{`ybT} zqcK`8+ea}?Q$3BOAZz?iRT4OZyYx3E%N8B=mDG9rO>%89(xJsgV0JrNV-}Z-Zu+vI zG8Qzzgt@i(ML2IVK&n~H#osmKWF$>gOn+5f-gC44{AHi=zk~7bh{*xBhZyFl`qBu~ z-eJ;lFyrdf?#l+$TMZlnhjN zk57`HnnFfhlX7x9Kq+eZ1gJSbTyjbMJN$0XDfbeyCDuOUu5NWqvR1G}w&)K&u)ED> z-E?7p)aNXDwiM>$zo8Q7OQJTy$_2xfNP5;v9ns3h@e9U$LTSO;dNygaIw#TA1C@QPm|SnSf^5|msFd5hk| zJ(#oPs2^Sx7k*&H0BZ2yR9F}@fjldPCfbOqk%6k3rx+on{a!9PRhBhZuoqc*6H+>= zx-hU*q%yAT#QTL8ht^w!x%q3BM9qj9b(!6{mmHBKHN|A`pVX;5>ogjk&KxJ>w**@R zujlYvGIyuzc5P7)|DakGte$iJ22I8th;yNe_JQE^p4yO(E*YLj8N7cr^;pLrI-c3G z{?@qiwAw1tm-!`QRntQ8(!*7Ix<*S^Iw+tvCwDGqF*K> z&63(PX+T}Yo3+^=1NKHbM7a5KgWz0zl(NI6o3yL~$)c!cAFB!IW(^mJlwLg%|L}R} z#@}JfESe<}`~wKRaZaUPh~tJ1&Cey{+S>Ye`l%E7ZG{jAT0oWv%3*+VJev80>^>F_ zo~Gv)FkV&wJ}9{H^nnKF{C05dXE8v(ZtzJeSyCj}e;tN_^yhJgA&$e}K=*x}&VmY$ z6SgRt3_Apc{h72d4twQ&2Epnj6bIWZJ<(T#bvd4J^BlMzly@4O`$1Uc+^q%G#juFD z^{~2QWXy_NS+^ZbEpc%t)t3J`XpBo^&S{wlrBUa5bzXhVyf!m#Jk|R~*sqQN+zBb~fk5UKTc7 zQFbmtjxHHcoyy#?nz~iZS(*})A6|{Q*s$m2`^*EkYPdaHmq$Jef+`~>?YGpaORq&L zo`GpwJu!N+HU7OD+IsjlTv$2i$+}0jXbMzUl6Bg8l-V{c^_)1wS0IIRHO)QdcQHYc z-X2~U{_?Q8xI#)Q{k=NT`TeC-S$_j_v?8>+DEdFAu$7bs600JWaI7j(fwO(i_|p6a zsjxwglV@!7@@r0 zaH^_Fs0%!7zz9)TUnnpOyL{REI2&W^iqAzvnn!nBpORfYdFF0J>{RwV?tkLu7yO_U ziJp#k={|fKiDeh@ilxpQj)`GuvLjXy=LP)C>+;$TTgpSLw37TqRi0~>#sdsas|TVf zDFw90IjsQRzEt9xTcTO?(~Fq4Td>z-cqIM}(ZcS^i@mNmvQh<+_4jx|A49A0f|Kl3 zo>RbIEHh69k>@E((!E&dEOGs)xNz!tJ79LuXGfUXx2JcXKeT`X=YBOo%q)U=U&* zE+zCxHzM6oxdrs+?yWmYBEt5c?AdmzMsYuBDvWOxgWA6KtKTr0f2UQNL<%9vVBcI@ zA9f;25aaeGI;{=3K!Wh3D+5|9Cn9mZuzH&oW(Mb|T+5}gG0R;%_FFq?Hg?aV{uO?- zhX*4-L5OygX_n^xrhlO#V+abaZB7CY2ci*)-a@ zy~(Mb@xK%u?$6nu7@oCw{#Eu`Y6oDZ{{7ZQ4d4MPdJe6zYXmXdum`mjJCDSnl zDOPBhQgH3{Iy!HeA)Ttm8Ng&AcGE!wpdYa&j=IX>-?1gfKYsquLEe3Fdm%k>x^nQc zQT9u?4DC`Eh$OuJ+KAR zk`b#YVtzb5jmkvm!=~y-fNGu(w(NuZ|6C`u0|#zQR_WC_@z>cA-CY!!2e#gG9W6~7 z(7rBT-2TYeqhyc_Ok%QLb~gx!r<4QH!-?)le%<@F6J7HQrkVOOD;h^>S)IDIRYd$V zf*KOh9(kUK!LiJ&0cp!%qd~=F8IL{5q+Wihl?4tdbGu1QlHnfZ@AFzLqsYtMU#bdm zOVgUQ1bA!FP44gF-34Pdg|4o2z=8gELolfqw|!yc^$6n(YP6Owf8yq`7|!rd_T%Z* z;l+@G=CKwOqO=$5hJeqzKMd^P)a4~4kqZvCxbbfFWGSgLxZ@ylQwsCD*yBdXv~wbO zKDrWc$1m*`knbWA*lX1G>3%wT1;ly8RYftMt7DIRQ2V5wG-eAON*D4qF$&VNp)zg* zPvglHqGz&x6DjHuwj){AzWK5wGlI^Atpa(L-=?G0jCV2@%qF=I@Vpah7{oZP5xz-f zH7b92vq4f+t+r-)OP69OhYK817QL+(Jr^{(^3Or*|2XsdclTCM@f`BoxC|Ez4?$?W zjDGaGvU6u@a>KQR2^VhEc+8X)NziPtl>5a52u`>r_!GmQ>uK54X6sQ#FBtuLb>x;W zWdU=6u?=45nA^tWGY)zCJH73u%lt2ljnT`7(}MundHOFBMC7Js#-jiF2b}VTPly-t>EQ%17*Lb2K`e=^qA8em*!R>OB ziBtGLVOk3!6QIEpuh(M^i4K0son@HF|16VZT$H~Q9A07EcCEzO1LhIpVycsP)odFq zx*i1nz+|Mp)x2Rlbfq!1CaH?t!S)BzG$TZrvGG=FYb&S4bb7QbIIZJOgDEs>6}AoS znM+Gq+wP12AA`bFKBh?&#`W4r9}MZSG!@l?#TPbBDB3>?e&^n?C0+OHKhK`9B8co4 zMz*gU=X?}n9$P~82Cr8PP~4DBkUUo~w?l_>Bo;7f0lhAI>fA#0i?@~y<>eBzxb7IKUSjct!w_iG@{K~iH8_V#jU$=ofMW>MW^AE*==Qj2&xE9QoH>dd54 z7DjQS+UFzo6PLeuR^z&0`z`su$_oI60V7e1Mxz%YYKv>N`O><)`_^A$C`yv|zVL&m zGAf{tCs}WEjc;0ceg;P-n3B94$A!Na`<0kc2Ras*Eg*Rb_dge#7W;j5;zZWu*Pw*o z&l;s_kS?;jlpE?w=yB{BtNI4P_hCnhsXKTh10%zDD0}QxiRqHh$7zpOSWnfIudApLn7BD*1f8tDh;!thF%Bj(W#l(npaAP z5B|~bq9Rbda}>>1*R6c4-oV0f-@mq!op=n8WSWis=HfIbLwj&9mrrxZO==*N#gkSq zYPNX%Gp2Hu!^_9M`dsN4AGXQcXTJ(NS}&68#Nih%Dt|3+E2BzgvdpV@BsY6cfLAB0 ze?8Dt_L9^IkpZhw(ipbZm6Z_WvQW+QV& zil9pBYJV~yvgR=VTNTBPm8TdrHqcOOVP=<^vSCpH<8Lc*1P_HzigfPxw1-v|HQP??4Argl9MW?XZ1TE&^ZNzZ z;3I|>TCMOc+}V*Qh1hA7RiS3R3iBAX6MIp}vI;$R+>?h;hjTiI=pH*t55!l;qlBK* zs`J9%sk}*2IEINb&JgYo3yZLj5=Zja4L2Eo{1&{NcqokS9f-M8e~BKK@I z^!Q7cPfd*FB_&u%x~SyKqg#f+pF7d0Vg;_&s z`HJ-wraLD#C*Dk7yve`HZ1ylx6rAIad$VrMCrC>xym;frlf8uPD3s~X)S#pb z8S$0+Q}HIg#1YMxw*G=gxUKI)diuze7xcY4TmY zYlH~&Yl>>cZ5gy^7b*qc{X$k;t4ko+A3MH|nd~`Hn)0CBkT~uswHFRsxm;*b_;9di z@nqNP$ac@t;@36tT}l=)q24YjTpG*&-Y;XQ<3(xI&3OUK+89|u^AX#Dio;4eBXuJZ zuANq@bHOWPQspg}XOZK>tUceCON`YSeS|#La`||5|8>bvoZcJUBWg}B-FuVkot1-S zA`#+ezY8~p^JQmneu%9*JREzbeZR_|O{3i!R-{<=tj{kD@mg`7m03@&`9am%2XE}z zVNq_`=TOcxWc^O=FU={W*Gu*E2VqaT!RmiC^v!^j-n}Uh*v!i7T!}WzW3#wJ=si2D zMhF(+nM}4DqV??VLZ|n1&Aw9a+>4$ACL`76tN4OH_z`Z6iD8DAUpd^km>>@$ZZqk7 z90Xcw*WQg9?))TA%g95t-g>k;{qF{opxKz&=r`aRY}ztnG4bnw3o-$L6nrmZ)HWps zdFV|{Ud9NjFZ$4m`w{4~gmFGtcDQEIzWiBgt}LI{?D`h?xHIX_4Gr~wG+IiqnR5$% zR(BVjzudnx9D0(qy%kG5s$S=RsT)CBH`GS^EBi>O7cCiwu5DVqJ!KmQi(M$fJXlpw zGcSw|%1d1_GW3|;`S7UbJ$xtH?7LV*7WPo%QUG_#>&NnR(kDQdS*MTE;1Ypk}Q%q zO%l)bm6nA4w$+7!Mie6?2o3ewJb`&qd=$;M%_%*Nmt9x(F~`H$WbD<_|9j!Dm0;@Tn$iZhe00Q_mo4Dy zQO&J8J?rG&|1j70WYrsC6pJ)DQ!=;2^e6tp5<<0)739`)ops$jU^8XYO6tUy4vmpy zL&MqEd1qpd5rOxFigMTV=y{^zUU5H4G5XwKi9CXR*Gvb*G^Y;I0cFtM90s! zioRlRrmNv3Iz;gNOO)KVu;h}m^2__2p5&Lfrb@IM-UNC!H#3He2tJhk6a-RJaiu## z?IF*e!>8%LGa4SxkPd_)qM&s4cBkPz86vEePxiB6lkv5pC`M@5vIRth2sb1yohg^> zf2M&R{i^YrNbRMCajBQWCp2r3IDJVps|x2jQ40#i^1wHzI$nfpj!KMEVVClJ zx8y)QFG~Ooss}L~6`*gw7SLi+o2g0S^D4ny80j7ZOb1pG?AGNDhg!n)_44l60`pEz ziSoz!_q=IsS(rNKYL+Wz&EzN-6+G{4v7hQilZ%mu!u&%dQx1Gt7Nh)0@>S@`cj9_y zGz#K5hsf~@vFQeTXWLe3BRI3ThaI0A8_iQXz5m^?+}}$fQjb`f!`xFeqf*cg0XX6T z3A)YxKf~lTcWl?e{Ce-p%;*xZGSzhWOVVs~LbiOhv=}wVQN%PFMR4Xtv0gS2_wZ7g>Ol8spAfHLmnIHVjq4 zz%S2P68fc*8(djo3od=v0+Ob~R=-og!-XO8%h>x~<)}yXa->gEdnCqGRuwDl6&I>r zbH8(_M=NUNKubqiWDjKOG{u_Za&vup_rDvwqb-@TK?{Jwyva51wS{rQ-si9?!oX>c z?MP2i#G1pyyL2xuljE2{>aC-lbJ0OX{5IYWu2A=*vdCH!%E1TXOT&~s2RnhFKEZa- zY(6Vl2YJk!J~pGj!7?U;t)QF7L47rAAE}X4#Vi3UAfup8vwIAQ3R+J|i<5M1Jt}$% z7xH{IeN)ad{qgd{r75Mp_dm}TA15eH<{(Y`BOMPef*a*Op6g?;yoMU!)8Uq_uB{Ua zW*%xf=_kob>ybc;{im2k&lRm2#XXT`e&h?R&NOv=-M<;|2;IOc$nr@4-k5i4S&vOY zY&zrYYF=Cj5#$&65|!`RjAGls19t`>r@#* zXbzcH(AwiX;|ZDqpWd77`6}nOrd9s#TUY`l+ZeZ=m5F4*Gi+w`;Q}#Z<0(q-J({O51X1gyASR#)YEG92bFgSS;R%F~ zY_;Ux$TNnR2ab6sy%MXGF+YZ@o3^SvuBe~xb-$c(2O%+@pblc!9?_l}g$}2Xt!E2r z|4BKcKbf#BajooNq{tIam}dIsGw{uxw%A~;_56KsvDL-$(Xypm(`2%Zv-1NU$tl{n zr_VDZaPtL+O8G{KougPRPrT8HHW!Jg{KJyOkArUTixiAMDUI4=kO1ck=!F z_hIiYlbX<0iMtPXzv$QBnU;CQ04PUj7|o~3f@_n%MU>_tt>%lYuu93GRH!b(`?JJx zt;if#_N3wu?>#1Yr;+l!fQM6A>S-nWCv3Q!s>FtP5*d!(z1(k&v zdGq6ua-br2G65p4ro4su5Lh{*;16M&RukjtjuTtoe3MweC24Y^D!aP}X%Z}#DXLr# zJ%<_VHtlxJ@PC|*Lw z;L^GPAC^Kv__u2Sq z%|bSa9^+|8&($Squ#=0Fmx4HDqUxJ-F-KtZva!H?C1 zC&>Mzp%OIZ@xkQdt~fj~I_b|hak-YL)Y|!2!ju1g7{5Tmvez%4hgRK?Itc0B57(>x zj+Rf;R5}D{nItt07KFw>1eW74{-wb`vcu`~(PLFy0R3yRgS-iUHyU{V^#wlD!(0kl zq?iUM=T^rq>;?5px`!|kNjxzYaxW3_!xl(8AXA4yTsvLi2X8&EedtZ(``MKCF z_6cdrJ2^aV#NOy@T+Lknw=f2M#D;W^JX{)&);uF^srmF}2_=H|fH}>kH(0k?arHxW z+BfCt&?q)))hv$ZQIe&uRPeuvTKF5Wo4U}m=cg>qM zrFu=x)_s!6>XIUv3S0j=* z63S@jw@`biydpw*Rdr(!%U@`S`F@fm^S~{4(bS9V+1g+NY!{{NluP{xUW8EgAQ?v( zwS%jLs7-RvY<@8x^dtQkqD2Sy?7VpNAIS;%)6Kqf(|^AaFxsEk+e)lvo>+A_F#p|f zN!b<7Y+-lvLsNi&lmljDLE}kJd4IPMKdv}>*!$Hz%?ZDuGDjKC<3sK+Y9>UGunR=# zW!<^YgvOh-Vxn|tQXNGSQ1#zkHXOzZ^r+ z%^!OdxM74S0T-pM8RfXlwBWhgKzVLYEc^sKQAhc7eEa^BV@GM4ho}$f0Hl*R7oG0L);+rE@NWIc^w_U_-`r}y0sxgqlh4wx11Ykk3oVB^&|0cLS| z2Jok;g?Io{sjfhf1~ITpP58 z+}a*r;)*GF-~Y7u*js9fY08M&93Qg?9|TVCZwl3-UvEc$wi{g}bVy%NMI2IRav50f8b?fhrmq|oSH&5utky$ zJJ4--9!7Jte=tCK@#tPe%zCXbUB8gGeb6DlKK|-5MQJ9he0xJWbmUU;Q}pB9{pR7Q zslY^~RjZT*{?~05kml%mzNTS>gf3|9in=97gvFOH3`PfFO-skPhb(V;k}jD_P64xU zQ_;;?6S~6S@(hUgS(Muy^Gkl9nS*7&i>nywa3#oy(|(%I&-g6C*{y|}b7M^jptRt- zE^k_NnIOvD@LJPx-T^~U`zs{~1tVlVNBS6GVg*S3O>B(r-Q!IfFqE8D6fk}gQ>3!R z3Z<7;YNHls2hlHz(g*`&*fB2n75m7)v``&-V~N$#W)(Uf%$efF9@OqQ2ZzG8g7%a`SpoXf;^K%BaCCvJ*q@@^QoOnD z%fO{&znaWj%JBex0sZ*amhQSKw?LXO#c$g=Ez*bCdIm3TpkmCsv?)}CFd8ziVrL8> z&LqWKwM$o(x$9wifh$9=VYi1&-1`3AkXuj3&dzSKV z5g2&WV!u|bxaZUpTIn3(l;xWoEC~T@jZrn()WMs9ephcY?mKjoQ1k5E#^rWV)$gB(F_p9Tug;(Qdszys5b@r+$!_{R zCe)}gbfF}mU>&%xOnC{*B{y$R^&FbZJ#xC^29#X=PM|mrwbRC1HPuv4jdPJowg~bL z1U`oe-ec^xQ;pkr#MJpKaC92QUxfzF{oHpBA0`n!BvmThq zT$wLBHEj?wNLp%Bjz~S8?!|5aVlM+j7wHWW2Lu zmxw>jGW6#h11mA(y$f6)+Z8yMfAPxAeqnCSsWExr4J64!SL1o5Dc`e>+YStqsq@*a zZQNo9S2B}xBh=1YoFakpT#cxhqcACW8|_}z1v}>wbLHtd%YtUg+R*wb_-|l_6f1_e zj$5Qo$JeFD5Y^L}v51FpBo}7`&nbYZ(*et3qo|hp@x$uO)z>xo0YR~*99Z+zq5`A} z2~j(vl!d5WP9m+YdrBZj#LCgc(=FXP@k{aJ1_6eBJ6yqJqvb~$Ox&iYB( zj_%3!TgkTdc2_r1+xt1{zZ|i-I-_#P_2p>v&&e7O-LOuudv>V~7j<-LZ9wjVV?}T4 ziJ++u!~*5U%hkWN(z<|!1F8yE-d7zJUrA+oL)u_rpm{7a-v+=TGo4x5_~4Ltv)9FM z+nS^!&w+C$F>C_Pb#8Vy9t0<$X&mHRpm0Sdce5Pk z-)rq`^BxrW>SF#j;oEo5zHOh0py?|JirQC8m^b?=j?W{SnZ%xgF6iGdC2NZRq>D-9X51t${n1`K{`?>6vmzrLq3X{;w5|Ev|nkLZkTG zqxd!RtoTXIbpPZf<+wi<{FT4FPVF!>U^8^FA+-gL_}aAwyHPtbX&dhDkIohM05A@5 z^KX#qGM$6tu4pwsW*>8=0GO^~krl9Dd-US%_+!-dm71Q|-B zpsLIxKrb47b_McGve6fQvi;IU;p1S~UbfAcrW2D!7K|Ir;WC1uq)>0tLT^yuM5p?57*Ns@UojsVRf z(~`o;(QG+C<5EWMNPgLFP%v7>JZ5o$tbc{288lDIQJzPSFm&acDRPef-wQ;?fytG- zN;1ZTH+}~kcGO1v$CFst_TqO!SVpiV@vg+A!ssd)*wSBn$&qiVl1gNBAgubyX8(j-EUgCUe`P z>;X?YNW*T#cl3`!K!QtQ-~@V~3MS0@jd$rDZIG5-)+#FjxDxdubxL^HS-V%TL_Ggo zSmDeou)?D;kBiR$roXm>`UOMJWyU4q=y*sDv}d<BD(F!{2(oQUs3yk;u%e@S$unMI3O|&re&iAM zg1n!C>&4gZC=rtVT(hx(6Y~n@+eJT2!RbkV|EF+iOf4c+ul5D_%qB*k%iZe)7r@Tr z{h4WBRDY!UNQ^MdYqibDBrAM!iNAs=nW9*#YwdTo`e7MI2XZC>&6EvJoQ-CWJV(&K z0NfBZohK^MHp4jFo40@u=@t;+3X;G5tLG>1se%#uA**)^Rc?T*W)Yq5>-YFWZEw%Y zv1?lm^xhGcN-nWqLu94dA(QWAG7DtorGJSah1#cB!J^dm;53l9B@dD zp!8LMcgg8SwtDBo8zh~>Sku?otrAOX+)-MC;+?@zB9r6Iw@TWq^us26w%5}1;Ed&j z$T)sbm^tBcEnoI}jHxuoW{HPaTs3T3N=twPG8seA#4jl-393C?JJ*wmA1T}9r_Sa% znh;s=_31D59Od}ttorst%c5O=TW+7p`il-F;n8-sxjjqo-{+yz02;9@WRU@xBiIt2 zkQWZPh>Vo4+$cuk;wtIAdJPj=XsQCmVIY|LO(}nqUj}k6yu&o#yl{cD-x+_5i(5Tg zzet(eAF3(;YlwU%@m-p#3$A777V+x^Sdg;+V?uK3pBlZ{0?gxRq<*j+#X5S$&rP#D zWJ~DO+IF-~iZdOI0Rz=j)X78|aFnkwvv8ZgVuK{EeWXg&6^Txz7kvMXiy;z4M5T(! zt0HRSKU&$@2%)yzR)`MNJRntu3R-c!o6T&Np%pncFZcK1J(I7wcM%8UFY9?6-?-)V znT95aLzl$ar4dZHgI@wC^@VP>J>gARNn=oW%q3L_TT)C`?$C@twhLAmdbrPo4Bw>j zYZcMdyQn1<>DpU$jXmH{J=NFZTohysx?j+sq|-UtmiKMcmH{D({JVQAZ$|rC9U9Bp z2~uk&pTldT=NApf`AZnQ6kX6fWcUYh6i5f2E6`*}9U{fvGH4}4vsq$HU%@iJzw2r6 z1uOHffcGWmcI|tFGO4i^zelu|D|B<`W4cZg9j;Yx_C0hOK5mV5tV+sb$JiTHxp1mW zG&zy?fmh+`+yAs|i;9#hug7=Uu0+R0QrH zwqC3-8h3^Qd)t~9b)Y(&5cR2RHxcKQw*s%Bzt2)Sg_^7>!1(2{m44wTT&L8(zAn?c zeLsGA;9cpTc}FS6s7JZ^o!j9B#Ew6@N?1wJ>cX-TTZff+;&|x@9mq50FH>bC8L3f@ zDrqhMzJlU&9U7Qy2bF8D=ChBz82*8{X$*TsQeSF7txInl-v2~e;Oi6f{E4BFy!jg-8-gBUiS9kI^>1OA)!jHUSL9^_7Wxa;*JK$m z4Cd(w?nKP9ewTL-d_$_ZZjiYxJOenXk`%ci$YTYdr;h~sYO%L9~c6RJ-9B6mB z{2anjCWbL_<)h3RYI_c@F9Go<1*#{Uf&Y0&$>+JE)OlqRhve~ZJ(1gvJ)!tGC}Hve z%*MIvt=lIboGBgu5rcs7{36=JhTQ4dE{vaJX;^XcW3BUFavl3RV}hRx-bd~`EEsu@ z{qdq{E%9cIBgth(i8%ovU_}y>>U*z>7;ux_G9DKu$d;Nyc%?n32@X4z)j=;pBYJ82 z2T~dhOkJ4t)FtaF|MKta&>}2%pt0EyW+DiC>9kmKf$aVCOn}aWXwWWaV&hBjtb;H4 z?qKhv^f%IHZqru^@4!<*Mk_`2hgdb@^S?ycyQxWdo|7a!D^+7O5Pf8q_u!?bC2GW- zlP6J)H)L65AKfaB-gt(S48=hgPqr#&>74II4Jp>6UZT{2e>XU0dN1bUN6CAqohe1@ zBgDfU5zmd1cE|hudJVOcV$gzAeV11!3K4CSVtndSb<`*=&H4jTIye>5Kncm?Tze+F z_Qb=D9w{^$k#Y9h!gKPTx)o2TOL+}wcF%$1akn(LX5?utDgUkFbJC=Ug|34a#UriP zTZqSOLxUE*8Ps^+@;1zIUq2-uJmv&?trRgrfim`f-&{LY&c$*wVrKcXO~R(gNebgj{0$4VkAV5*}Dztx6E`CTGQFJm8DI*9U#dzeWITkhvu79BLo;ad!1EO7@0(vQPb%U5 zuFC`^RTjTz)113|H!Mg6$+XB?wCjr^j2=PntXM9Q5K8vVrU=5?^AuAAI>5LiON`^ zC6fwIdZ5ohKrK7-VQVsfd8B#H$EAbFTo@zAceeHKV7&ZwsA^%wnAuA-$0hw0VAz4{ zu2A;tOrnro&16WHX8$DCMq1dB2s<0j8T+lVb5f)rJs%X6WflkL^gOejwH(#HKIo>= zf58GnWl9ZcuR5y^hrH}nrvs%*(_Ye*TA$Q@5^ef4w z7qQn1|4vZPDX#t=?;k1kV%(@mO`G;PJb>#rEeg$Gnj=1A7PY37VibU%?C75>xoR^fZQs56}89yXezu`X*GGWn{~8q`?5wfP5CT( zn0KTa$FfDFhZ>a8QqLoi=^cvK!3>#_t?;E>k*@ztJN)m4IUHU&&Abi#NQo6-^8Fz4 zJ7+9w1?}k-iRU}}^w(y*iG-X(?X%sN5o~w{um$}{UhZD69xnf259CBoU(_E1vW6;R zTaVBI{As*O-H|Kx<$Y%;IqjEk5F2r#zq)tzU=9@zQFbY>2bU>9no}bsuC*iW4c3OH z8NilJT?U4|KM^qD*1EBZ%^YefIphk*ycf1G5{^2ebT5YP@eTI7pAVa|2ZGK6C)FPV z^c!N#4Y{_}w6aWbI^ zsE0j;&;Q*}Y@>fmvrgt4c(Ni&KmM`wsn~7WvsPeWI7LlP{&&NS6qHPiN__gC2Zr+c z@vLn~Plm1m3oa{Ph?ZoxIgjq&7V~!YdsbQ5L5)>qBefNzVB351O*qu)%fm~#VCjD} z#+0RGQ>-%`g~oje?g=Ma2d0dJ@Sg_*b;@6AX8aV~LQzsiTrGOpJ-=xf3rh9Z=WS#K@ zv5^xMcHr3kvQhm~zg5qz)QZ@O-_gyaD{?U=;54Q;$?I!IYH{4+;}Xnc#z&=HoC&L7 z#s_uO+YvuC&Z9y9<}VD(S|Gp3QQ!EjO_)@M&Rb!7Y`;l=VPB5g5C5Tnd8NqSzf0?p ziom^p#wpPI6$iAjnAAnP{|M4g3zXZO(Y|QCFgKLQDhzvMFxrHk(}s|ln~xNHQJBkO zmK5mR!BA*4%ft7F3;L7)vOh*KO&ycxw1cQNhi{-o+cU36Sc>AhC8p^w{cv)8cO9{J zirTT|eo^T86@p5~qH}P*;2d#bRKRizqT+X%upGl=VM=|AW6!MlO)5+5c5z>txc@_= z!=pOxFHiVzs;dG&RdQ4X&e(ssH&sw^qUzp2G@k41S`#c7t}=pe9UX_Vtc&@HpcDAh zEQu44SL>ob%J`xz<-AjW{UNuiw%cC&v+FV8xY0WF8`uG53zWfS(J`Q;2QKq-AR?Ys zyjNY$*f`JJhOshBgx7hH@WJMJGns^GZ)O|wlzIxJCS+w2FAhV_=ZmY9ktm0+MEMo^ zS)C$kopIS%4rteXu32n7X655ozV-17|0QBFaYxj@8}@1XHP%D)trKARbl#I9Oh8se&HJQA_g z!9P`N%@|XL_H5%M7VNE}_3{aD(Y+drz_-mT(iuFC(SC zAXd8GSLlNvXyz3^x_sV;uDun5TK4C;{Rmjh#(d;T_gl!mda?21;H0Iw4ytb6?}8)T zc=3H7UP^)ABng1`fIds{rjyoc_sN{`&_`~K&jYzM>gnPC>~foI zY4IJk)L!Q~@NG4t50MnhxGRNlu1Or?ui*V~z1@QXDCyPbm)HN9JFebY*gN2a?HgA7}I6Q9?+9lY!yYMxp0<_0Wf zs7?d2WvfHOrJh|y;vZ@fHTy=Hy=&~pX28JHqsYD@yRL~_mm5+2MK{GQ(LTs5Y*`onw2(+G&u_5NlXF>fyaN#YVI|jc^ zXR8Y`CUv>rNXua&PYC2yMwsP@&EH@?8fYZ6W%((u1vj)~bGX=lXbr{UsC(G9ND z)<;ktsRe)EELm&G$(~@Qk3y<$Mp;8Y&B90LL2yAlG%_`UIlb(*o%zgGO*;NIxDvi+ zk9)aBgvim5+V^yC2`h3AE?jPk$;x5S)bCpyNhc$Xzhp>{-&PGgG9-3wxZ8}<`>*97^jw#X_tl;Xn!2M2a=Tu(HE4B9^m}>$cQ%EDIoug(U~xN=@C5p>ex;d2 zaJuK{6YBRZUI?JdVYNLw`QsOnG*1vY`lxjKexu0PW7>lY=dJIln`N5$sVLEQ(>p8= zfhF7-dC(EwFYci=F0g*L{C8cfu?6+(xu@pvoYkb0aal$O>Ai*Y&0@x9;JwsP7VA;J zHw!9wNS0B854MX$gVGNCsTrNA+I_BSW$U9E_XC(SO?RGIa1Robkv+g$;EHne=3oR6 zzwYhUTBpFU4$7|qb3+nm$Kz6l-{kYG(b>0ad~TpulC17LnEqE|8L-+j;^TFIyQ~Y* zBtj}QdFo4XC~@K$nyW@6i+#SNUG-FXV!}US^d_+G8eSqb?|>Cx1Gu!7W|^a8Wf;0q znCIQ7(bO#L63G&VQ*a~1U#-<4Q$LH>6Qr%HUvHXNOKja}On=$O%Db-1Gkf>WhuY?> zu+>}H6aAw@jZ?(YBOL|EgYpYQ30~)k3Fk(8`w;&?4O=c0oZQ#s`&?6=KpH0E-r|2o zF?6*3xZUDAbBIu8np-9{euN(1Eq|%Vc^T=~MYcQ2O37k6emLe;b;M+~R1>{=?hrN3xFt8M1!Q}i{fQ7HC4Hw(@_#XBhRy4O2PrLB=!f@dd#Vl!w-m^ z^)Aa?^-I1GJ4AWVcBvpZQ_kpFTS8@Id9?2|$NLjQ)Q%soiHFmoEKJU-AjE55Q2;7L zlM?lN%!-NQO0SCh_|Unnj^lJW!=U)>qb(_L$5Wr_>aurTm$g_Z9B#zOag4@%Kp+ z!F9y?Ua1Rh_~1lkS-xL$W^&8>z=dhaCumsDf;(-CbN7BbhG*Y-;_cT|;qRj#B;#B| zQjkjIp`D4leLt%SGme(PGtN0vVP%gSBld(n%rn@56~G1#klNQ z8%^QG6#W_PKB_WtI>gORb?2AMv&BgqsHj?lS0E>LWAwlu)hME~QE7y8DYZ;d0j2y= z*lG~E4ztDPig}pikqTb~I(~%$RHT2A@mKApcb_TFZ(#9c(Sw1Liu5U1V?b7`73T}+ zp-gC~>ZLT{CiAbK%G&6%_~hj3YR$V)KJ5*DTVys88P9MZlh|eJic=kxpd8Wv4}QFT zn`iH^&j#Ycwnl;A4e3&q8_J~5*>PG3pQH$nO;YxSujTm)nVANjK-te!+2aNSu#^*)rK&ZdH4zU1WQlhDNqROw&zo~C49<|x+}Xa43Kr~C{) zKZJ5x^SD8W{galJU=UqRIQV7&AbJMFPYT-sP{odj2jJ&Nl|DUd6?JwuQ{Yh{b$xcZu)&vtN<}danul2^y`WdySOfOOXIrDV7Vy0Z1M)5#oxE% z?bc>|8?>Ts40#7X^R!Ru`tLfd&G7imF1?X~93^Z=u!&y{vni3WQOx>K9$Eq&&eJ&g zpdE%kAc}W$wZ35B@t_2`n3h^TOIy00hKyFRY~g38P_fGk0gd&ES%#+^ztRB#6WYk% zG>4`{@|{_ml555jfo#n)HM?Fg3t3-NnNjowD+MIc|6*RrH2MDzD`Fafk) z={+be6c(vGfoP-7`klrDe_suej3x2^psNNQczs4QN`|iq3OF*TiCw7R31V9jN^1nV zeUjK$3K#tXvn9#>ph(&eJlmh8naZNH2q zJM0?33Ym_qCI;)XiT2~CB|DG%zPVBj5}(l0=cOSG_7%^z(T}AU#-SpY-c6b8UOwNx z?H(n)&<3p;Fzl>F@9bwdurlmq1_ac!6G zXI~_T{C2iu?$sXb|c99{Lc z6ztEvK(ukwr0?W&VGGx@P+j0g?gYbW1mYecZUp)3!F}wxzTdJT`W`)ROG-^f^GM8 zF)H$-&gJGhVg34}ELm@j(qg^lP5#Elf*Ax(tvU*Cd zqo9{>Qa9|2oU(54B{<@ORiH09sMh#s{_$mZv-g6rKeWX}jgzQ3^zWmy`a$y6kQDND*azNdy_>5U8ZRsnSnUuBQcnSdJMJ`%zIHcOHeJhd~WBVosxJ{gqoK3tCC z8OzHkXv)17*b@rd;_q`u(+jC`x3-$(9hI z$qAQ1m#NwNm1#5W(~~TXND=oH`hZ`zV`P3uBx7geG7gcCWKq$2>MLT4>}5MHey(6% zHGD-}fySGc#j`;-SEswFRCYl9(tmW-suYU^e4tmlLg3-k;Rx z#QRF}DzSB_O-`A$d>bH?32TGuV(e>@e578qkJpMr^YMwsK!baALIF!XW(T3`B?S5W zyUv7{ovudib>tiyl)chM@cN{uhGlVz3{813R_xyi&*`b26I-nLNAqU=>QR|=d=oRF zDHG}8X*%T*o_HKhh_mKVp5gQt73gUsY$yKD2DpP&4eK>C*_a28)BlEt5|=Rujyv31Pt zJVWeFK-*a#{21Dj{20oEmApA*6iLrnnAFTap0rLGn^6bG>!q6(w+j`3;T%;_izNRU zTG0Obfu0yqzt0_xwL-L!1S&)$Vhw-sPP}wCukB^;m=d-^h9;-y`P-+Ff;nc z$cQp{hsT#?O`cLYF+V_Jp996$@1`@}JDW{>3HtR)E)vT(M^S#1tS=Y%j|C-j0|~Pf zn1VNzHk2EdB69(<%AJ7k)2$6`8tE;G$LxymvkO}yzP?!k53)N|uXyC0izNTvt1A>N z!DOTr9T`-C1~jt{O7$$$&I77hmBKri^&r>0lfSLTZe)=4(4(9Wq(n~?j%R6~ZZ$g{ zx#4|v3edIUoi)c{daR4&2)yHbGQ&J{u>D$TK4`>iXuPPi4E$-47d6yFagVzkl`HC*maL z|HMLe3g&kQLVbQd;e&P+YxcaSqWvZ@D&bIJqDC5YN#!1Hy|-t|@CN_aOzCiayMluM zTdjH|K3ZT9m)@&@UG>PcOJ=tg?V$k~C(4^+Dt(bEfh^jHLTb<($4|9>BJ_l&Bf>PR zf$K{*A0sWQYMMz4)vm=GW?pI7M^3p;Kp!%?!RDYP&w+29$fHDYOwSE+!0+>yT9OOy2rA+zarhK{h+;?` zwS}$FxC#X}M7k5t5dvlA#0>BA5aXy=HBvE~?MuZnm-?w1T?OLz76nJ8{|+#(XR&ZU zv8*Fa;a$wA&OZ1G_g>AnCwu5+XPJfx^s}v)RD0`_?4_uG*ZBsRzM?zKSwB@>0>!Sk zp&f`H6={?F3@gl@X^jICIg_8GMHEKT`+^duGVMnrqV^N8fc@W@WZN4#K>ENQvMDin zHx+xQ%T*`73Q``^YP{46Z@C(hdS+QxkKbuks+afDxo7!+x;aue*pEi9AD=6pOEL28 ze0ud!*pPYKvH9%nntzHcP?L>m5g|?4NMNv&HOtJ3XFH5viT*cPj6Tkq1c_I&0e5w% zqP1*f{^U=#K~QyzEnZ@;`T;=H?qLX51xn<%No9e$*YPUlA*^4v!(O#<`pv<&VIRU; z)lLH?8jUtUYp{dzmPEIBx~ zSNz2m%z;!YNwNW`Qk}1r0_T!r{gC}(D?=YkzWG3P^>Afg@Tq)Qu3&urBtS=1i#Z#gPlUfP9M z{F=YIv$M^&!RMo<>ntBZlrZgLv?74lHsb*9EQi&EJbi|rWTjIokS9VD$DGTt)O#^u zdkcgPU5J&Gu-g0@A;H7r2||P+%2~mYxxsFYx3aZ##rSL1+&J(v?{}N3O zX!kBSB=i)aEod)@JSA!7&V$L8&(_q&ie=IMR-f}+RW)Y`xZ~X7bjhC`iE+JLu&shW z(GDVXYcaplA+{ecYYfg8@j(w6=Z&@D7BFR{flVZE!`SqAb;L31z*V?F1c|)s%crgH zA7uan{#En;pLi*op&>3cv3mkDos2lPyPli}dT3mVx9{*UC9c3|e6Fl6$qu`LGQ|bvO|6 z%s4NUGgTcce}N4(4{E!l-0E`R|LT%?CV?EDdcT;WKIoSlP}!RyMctq{=Ugf+(u!47 z5p%O8s)4w%J1(lkpW2M!&^+#ynC%JfoeJnZkpLGmYs)Da|E}9@_gx;U8~WKao_-Z~ zW?AdQ3q&s-nZTmkpugA~ryCtT8;I`6G-1aaP?I;+CnKyuTkvm~fMU_KS+{6|+Bjzv zsT=H@)uy#q((0o%GuXaqQXf%AaruHGICMdq9r-F6u~aSe(?GUmZ7A3<`Q%}J=Lwkd z(UG07?E#);E^%g~U$JsewK*U`632HAToaGy#nc2S4*}czTs>EI&A++DN9RAXBu!i8H()w8r z{Qp*Rtc!hLKy>}B>n6&;L{tr4SAlM0>R~`&3}wSN$=uLMXaDWT2hjy2Z!5|(3*1Ot z(#J70U=fAZ;$fmO$#vY)b;pV2j*(Ba0yf%IPT4 zWExsU1W-LFk&J=j(-M2C!gq0}a|d+l|yP3LxtPK-P1PN zny8X!{rm$JO*l$|JDY*D$j$TUpFb4$Bsz*PLLzuL4~;`ZcX&Fo0{^(b)1yQAz6&yR zWm_o!kCBXeVr5YiS&MJhr8`m4fzdo;2tM+V4 z@-QYgEoEp*Rp%qnPLspFnZ=&`J5hazDCB6=U{Z$g2xyRH3bf`52IsPmqNj4KrGFna z9<9zq#3cQNA|K4S!6o!6NvpNLwfE`eb*RhQZ|8J|y#cJ5|5jHc&N$GD{Xi18m0XlB zTxp1x9vs&kIei|}Lj#Hrpg1~wy8IX8*PqA+k3nZh!MXVUoA0}4Umnce{q>9JjrTKu z*c~k8N;EmqA7mv*62p4Y=K?+&9Z2gR2WRDqVl|ySlDh}Qg75S>3sloL_)?#E^6<#N z>*C@#5$6J4oQ!?k+Ftijea*fZr;!Gx-kYkb$h}e|_jergR`r2JJLLe%%lOURh86ry zRunUY4`}l6L3}RRyt@LW$Pxr-k;rL{l*^T(erm4Of(Kno$ZZRRv(QeU8J0l zCf8ot9B_lN2Y*aiDEaXnv_XGygT!U=b{-ExI9N?vyYEzRc&ERrnIRiplghx?@9wj? z;(RhKf9eb7o9|Aqg{9$-%zcm@m{Xbx$i6X!?bO+)aa@}^OC~j?+|Sm(&&+6tmpAVii`>G>gH*KWk=qEXzXng z9am3yyTNL6x0!fpte4+GZH(DgvqFau@ptxQpIz((p^u z-jq+@8_bKvur7+O=ygkK{FQ&#xo6}1nPP(v#|;#ZHc#&8V~ck?7ZzLzNx~9Oi?V`g z)K6buVeKz?IiV3PATc_5_P+YZ3C)&q0m{hZeb}4A#KJ>^Hp4U|ZVzqfh1&{`{GUpm zRv4EUq(h@K_%hvg;JZ`PH$LoJ=Hbb(r(HFW15ZP<4ots$_2T0RT5hsW;J|m1Eo+#B z*&{K{p49vjesYX|gU*}zeJ{~r&Jg6(Y`CvAo}+IavpF{es#ni5fiwD1kc~LBaJ%A= z{d>V1Zqs3t5Dl*lNZunC$ywz+eI}I$-%jrefmW%_o~zG)3Z)Lx+u-s{DR_I2Q}#&K z7f6#ovig9Y_Z$8{m>6JFZ+;uAxVWtMP(MFJIoh~Op*m{sxuAoC>d)A_Z=QK| z4sE#mN`{-1n}ak$8%bOFSSAW3XSN0fO7>$2*bLjB0b?7#;HP~eON2R&Q%n`q#5kEO zNmv-#D-AIok1c69)bm$7^RaXx-tFKl5tQY$TDyE&iqB3PWu-JYsrvw`{-2s0bUCc1 zl@#P@aom$=c?YQHxE{#tEs=Hj+kjv@?Nh6E4u);pV+LsSoE`Ie(I#;(J=aD;5aKt9xdM58waJNC7 zJ!wvETdhP=kMynrXZYVDR&-L^JaP^?Esg7u+Q*GPhQxDP>mYb0qA2vshlph&ZN_eX za(a;sKG0pP;$URZZ$|qY&yH*op1lF|nVwVSO$`os$0tDV){HozeFxFivcxT;dF|9t z<@3n3?a1!I;oxN!0^6p5hUGTOp~N@xJ_}uXQQ4!b^kamLg>dRP3fg z)n2`am3yvq_PsB!HsASMK6}hxX~AYD%zv{#Gx}(i6%e4z11LY5p6+GBpo5Y7rl>h~ z_|QwMg|xV3A&NR2Iq-8!CiF|~Wxuq(mO{8zj!qwj{ z(ecLYK3n!`0R}IACZOZzc|i1&aRPS^O^CdVJDP+AhM9#l73pU404eFZ(sXb@Kid)! zf4Z`vZA|S~VRRL9_B_BkbWg-D^BXtc)raNdX0Y4DqAOUJ_G2bJ^6Om0@h?4J$1&+a zTyfqX$#xB?Edjkxr%&5JxOa~k3%8Yc8ZmA)TU*(+?EyMR zD$i}yy>y9s2IjQXiLiBtwn8uTH{$AhjPG}5@xDrAkN#HA3f=jNsrh{{3}vh$6idn>DScDL43c-Pf~}A?viY{|&ChziLd2q0?1; zL79HJ?d?yM{nmP}C-Q@^S722P2aJj0ESP*3Y}@r@&pXDwmw^;V&P*ps2W7>YVi1hK z=W{Kp^pjx7LQY@vE>=c@0rfwWYG>$F22rO8xobEN%RyUzw#grid4QD$tzb*L!3w&sh zN5OQ1P88~IoHYLHRit0X$C{5#HLtr=bPRRg=Wbb=BD;ts`QwAcXkZ(JD*l{)sCCM> zse$xUx*Xa>M%?8$Y{>Xij8OmT(Y3MaG&8tK@}Kfy)e$OABuC1)?{GGrUP$)hOPM19 zM4mJ;*Fz`Db{TZ1;-k?IXgfTbkoc`aa0-!qel5=BUXLRw50nwEBU51>izR$b-k+l@ z2GaIr*6nc@YqeBsASZm2hfhh2!Q5brF_G*dO~W>Qb978^aJ7ES(hh(^WyeB&=!nBN zz=-;jz8)WccAj&eq6x?GCmk)L!!4l|kr7R+{RuH6FUHV3sZVBSwdZwlb8Qz8R@d?E z@Qqpn?tq_QwUm~~)vPIswd|3uZyLvK_hdUmjvM%$da4s_aa3KNhrA?NKhN$m{;9>~ zpD{4cqRacm$?57NhCa3btetxHgjP?C;zDjqtux0Vg!VN{UP^r%D>>wVeH&d}&!v*) z-b9z<$?YT1#Gv5iAd5^3AA`6K>^&Fl#_XwEgSZ|1@WMhOGs|+WSu;a{MeL)R#yM_3 z1roDQ(tpm7UO=_nbh-+EIGJuLu;ST?gXJJPYVW2F(&s? zb*$Gop24LMM^(WamGQFR21iaTDVX&{nda32FRAHkp!u=;jG!@CO{M z`^h&>*SSX!y_vbsD|!mE>{Jotzc`M8yz^IP+GU!UmG(u=uCIGOuXBLj2gIqME{hy> zD^Ws!0HyK!Q+J1iAbkn99&#Bn(bDi^{5uEjR=~?p78A>Zkcq-A^r;N*&(!3D-oB`> zA)}uta>9((dri9Cu4L-JFh|59jC5kY$! z!^x~pN?-l0e^~yAtr@DeiQM67B)J;2FI>m{^7)x zAv1oEj^szhUwPiHVY=w=trW3f4PGOBS-nJhx z2ZBt*i`za#-Px2G8X>>fk|^h3cBtHv&N`b+FR-@lSn3@F55%7&)gFEacvoqM#-!aM z(bD}@&eu`;(*Zla6=WU^KTYR9ED`OadO{b1>n=$xFFiE2^kcZ!puFsT*Jge``6S}B zX?#~+9F}2-W*SCkB+Bs*UuY@;XAc59#)HsT3VmLwEOgAqr4S!7f3pIIx*R}LIgU%c ziQ2{`C8-0vIr&>HR`Gix5IPOqXSCaQl9xyt0qC07Zx0{~l9%&>W`m{BZE@C%{SPk* zg_ zBA@l{2Q*>6+i8fTuz_+B?q&P)?1;$l18i(j8Hv0zyPSX)yvQ|l5@=a4235DKL{=BY z@^*GTRukiu-4Pvr`~Gw2A-+PpnrPsfUQz!yVx&pQjF-a;x`F=9{2y|reur@wdZkc_ zd~TCweK_9@8;Nsm_y&QT4|WsapZU)sCY8(Co8|?vGp9wMih=`$W`~tr z$?OEB^WSw}X1nB&-HmRQVl}(@;%7raAh{VdL_6@P@Ciy6%rGgjE1i<$5%gxzk4NX7 zuCMEi*Upo1IqGPjPy%xne-qVueNr3ug9iya8URclS+HfQ(nAvWHpo(;KlRrHdg~l~ zD>bug!HJcr>hz(!=(GLh!{bs}_J_EZ&El+IUmp{c5B3VjF{JQmXge`;*Vg`PEzIKa893-9UkEbPRJMdw&Hab_ z&Osjzf7qLDL&7kHKIo&Qw8v$B@bn=C^8S&DY5Vlz!sQC)dySQXrSeQNn?$r@T{|34 z*4!MoY4Gv$cbJPd)D%z%x11>3Ba0C-`Gac+=v5wMWXEhO$PlJDDm=vSE`~)@S~t;X zwbA~RyQ6f4>x1nBCJMdpgH+s=mV`X~Q;GmJ-X{z=C`Hs__vv1%<&!08$Uio+3wIMi~b*O161jPqd2LYuL;Aph_aKoEb;kM5tt4oCd zcvxWc#XE@)?fVKjsFnr2NwkbK2fc^0qz$(?LhoW+bX?*p%k&6}LIF3habM({8lB69 z$0lMzcF&B^sV^E~2ZvEX;}SCQ@rZPI1No%q=9d|%{2=nQ5HdP6FIr?bBZU-OiGB7L zA@{3tA#$h!d-gCk*y_^7zFNPq?M$TM;(Suc2}(`bD{uo|Hsah~iF3v;mG+p- z#O{54O+pRn6U9M43ljIac$eHDBBL||R39;e<>IyD!lQ(-D7E zc-)Odf}=G;Vf@!W$(`ru6zFOJ62Ipa&QVwz>KE5Z$m;amwb&{S{#RHtfKEi+iZn6X z{fsA}W*8@G4NQ2k?1^RGR#UAqD@Vd_TQc*O&%L(ltRcHE8)n>-MUZ6ujHlrf({30C z*X6F52WwprGTbtmC_tSazYr;IQgg~@Do4~BRY_gcVhi^sW_}>o*z}l`b44DuTa(vY zb~w)ce9sWYD)@nBRDfA1GPNS$fPlCYThZ9KXSjbP@NBOO71?8WDdQ1sbau6`GB*(~ zlMRv7qj#P8V>MW>^M`VHC!@(PrEyQaPn>|wI(dE0X^_{~oU+HUnZ})dDD3b4wGRj<3 zbEbog!w6pdR#)-xmx)8#zEY+B?m^j}mi@-mf7h*>kNR5TRs=Gv7x6#c(D(Qi+Ch>I zKZ=-_^mAts#jSW5z7X}!YPIB=RphqxIq`tZ_p{exVu{t6O#gn*{wlS%VC^x1@@VxN z!#c;xnNI}$CPm{80+8F;-{^JCKtaZ9-2RBm30c}*|E_zVfhY%f;;!ZU zR@KFoa7YLrK``1;uS(2B?dQuwc;7uBqj+8Q?~XDZ=K*PbWOR+KUtX;2{MyBhedP~Q z{Q8&}5fAlo^ zl^7Lr!sCojJh?Wfima~DJ1u5*3gK;aW?!r@oE#Z&JNdL;z989^E{TKfdY|#@-<2Yn zx|*}hX>FNj+ds)E2U_sf9iaSq$XRcKp5~5U>6-yqNi!-cdq{#HlGJ@C`(B=Neos|h z#DN{ykGKO@S<^bu4eb%N{-f$;zvm=3Dh|;^6##I^OciO3)lisT&{a>}{&d0kIkvX; z>{v5iRJ<1U_GAIwti4F>HtqCGe)PFWYYD8O6sI9)>$sZ8(Uh;SgK>ed*}BD1WYDzG$D%_ZMpQOg2S_a={-#LiaG4BVB zZMS_fPmlgBaE@wwgl{m-HdUY}n$bE6{O_MuD95y)LeLh>|8JV9OUe_Tw0gOAip~1D z+Wg%CP}2scjeATT{vX4LtBo{X#9sH;=a}|x)0AHpZd+9)qGB@-)Gwtoo2bisLmtV# zyg3aW>K2agn)vnTAZDx%tn2oRA2}Fbd(*P8{JF{U^HJVe+=K1}k)wuK{Hnq8G^js| zQYC0WkvIwg6E7AvPs188{Fd(>E};xJfP`PCuZ5c4eD~?`{QJBs9CA%mG}k7J= zF0#agseIL##^HD*F)e6Q4&o199q{9H8%t2%B`h&27;bPBnz9IXn(GyJ95ikW|8&9< zMZJjuDtgmIYPT9sMyM((4@86^Iu8tgYk{U^1)jV76gE3b5u8=3mDJEyp@w}@3ZBFOT+2f{xyRn^jm$MIMjg@^q3X9h@h!n z{de64|7QG)?&T$|fzn<1C}vO1OfN2{Fl2k%*ITa!Rc7ePA(hSjqAx-eTg!>p8d6nP z4o*XY&C6qVIn>*eo|}Z^MS`Vp5J7C#P9Y~f9Cp8P{h_M(%`3ieCtf%m)xFKV)&PZl zQpUyY3y+vEP92!1AFWRQgqveFag^~A!>%2`tp>nRe4P$y%Q*M3WzH$Uf#JSUzldi^G#z5E7#)Y_wv(b$c`07{@852=%5n@UBfN zY9S-D_+JY*jU%+D5TrjpAHBLISlQIF5PG$ktiA+|UiBWtK+(ES9gntO+iX~#4XlV- zpiO))Vw}L78c_SzB+=c)tDGe=Ca3eKVf{^g$Qd=WnFq3Ncg7L-L2TV9zO8iu4M%}i zPG9wDIFF}jV$nzaBfa!&k;yroO$-38OUzL_G=le*g1%^Q)T&5XDBXN7-SY7Nb^} zKX=$^_|5l%hY1&)g^{iVfuYtDR303NXn<}}9&K7If-J2`!*M7#MDrn|F(PTvk? zTCunE#6?87sKu!HR-&tt*@)PO8@&ztqW$kFZiV0Z!tck1w21u~$BukCm&sYi3*V;4 zPqoc`_)>@m2g0eum9XLP`?aNYh4mjQe-u=ti~|0g3#wPhvRLkmFVs7*EaZYW4VeAC z{hbVZsO-fTGYlyi7m-Tn7<_yXr}Uw7{@dsYIJNe$;If61V=Z`#h?7xZc8vG+aX@Pd zm`-Yx#6UhJ??8e8-?7T2TC?!yh^01iqaTB*VL+_S-tqr;WCHacRB=~hjDZb>pUaQ4 zAmAq~5j&DLc;4}kPsQayI0zYXjo9X#yZb`b)Qs^R?QShX;&HMBYQNSH0u=1+&v4n1 z!sH+4s&Zz#zhl4=<4w2Th+Yhuws=xzEHR`>)o!>KjaEPE7~m=gYZIDw=Qr9Yww7q$ z?=*rM7WnZyo;rsVeEN5t{(P*$|Dh~iVyO;bmYoo7LmF+DEROFN^PYVT^ZhhS_a`NB z1)2T{LkNX#NVsE=hptiB_5Dgrmx_frq+YL(#7%&V^r32C40G<&jrFxe2Zt&_7Eahft(aC;&w^hfEH}TaMffyLu{zPVec6@pUgMuqe}eOL`Y^Wi#^B`-eCAAK z|8T?cs6Vl=>-k(-6LS-pMt;%2b=<98^&o{Gu1><;TE1D`Oo#su+aHrRJL6n8{X4#{ zoQ*g(Us6Bm-TSB|T%XBA-JgHI|`pYwf_Wf2rcRAxM|D37y0V_{|qD7i^Cyo==Jw-zxtg z*(PANVGfBC-{N(h^(B|R6mUs>`@q>a$DV8PW_`^9vCkF42tqn&j6 zIYjQ+bQ~CROk^FJXV(;s6RVQ6l*{iW>xoA$(%qXA@%2rO*+5h`1>j0{LnC9m`g98t zmFSB7+r_N!Ajsjwf3qLS${u>vB5_`PSh2yp%Y*x=<;I;523d9q4g^H_gi10=`J&NOh=Xd;tYsxU+z#oTKG{zGJHR$ zM#XP=F&yWQRox$JM&4`5v8L*oE`W|MTC?8IJpZJ-<7SO{$aAWF#f|J85jVeS51fg4 z#+DZ}g;=9ZHZy9zwSU>$QGCHsKaYEIR+ax1-{6>vqQW+hhdV^nA@55uGazsUpA3W) zE9*>Cy+ z>6VF20Utgt)*O*~=&^KU-@H0?m6jD--LtO^;y}6scb!^q{3RBx^kp^&_>|uH+C?y8Uad5O-O?tK z{flvuZzqjM1h1vJC~hSF#McxVUN;u^LrWuH=7=+BhFTf=I$$c}<9y_yXxH(K6GFc^ zj{j}=srYz<*=x1@s{<&C<;Y+(GM!^br{=Ij$uIk!bOp>Dwn{6wq2grQWt;3vUmkj# zqg_<5KHW1N>Mks7-V*#o8~e2|gv?guX9k;o1Mrhe)=@G)35>^U`gRi*o1qz4Tmb&! zbraR{YLhbY;cB#}(5{xqtHxbF*JCX4{&qtn>rnMKM|e!0GQcgCndoVd^(lcdDp{F9K+BiuBfMI#u^<>4@~m@Zimr)&=7NMM|11IrAiQ z*rmFD`9|(x#$eDV@wq8Otb;&IEW%C0)336cc*Z?`tBBN-Q!W+c+JsaX0TnN}f}J}* zJGs`o=W*EA1jc_E#h&(}?dT$g-*m;u&0sKL*$n^w@_erE<3tp334S7Z?-1Bxs=ds5 zg4XNt1kg*Bmq)I|H)kt?jYk>#&Ef+&tBAkwSoC%*uL@zM)}tmn`^JF9K;JGU=^IKv zK+C$y+xLny6>8rYWA;tG!DU|Q?Z;<1t{Ij7qsU$El91^%h~J_eemAx$ba06{lZrTu z=TXdFf3~)=)(;!M!IM{yDvB0G2jU!AK+=LAuWFxa%&M9F>GkW`us4dZjxEC%wF%pB zaEglCjo?J; z^+YfQl`O*p#0?IE^rGO9nP@~4za^C7VPE*!<|&4lIDl)eF>*=wy_0xB6qU~FGwcNjT;3>r1IT+tjje+US`G6m7Rplv`0 zx~BLG|Bv?Ls8#l%KKJ4V#+7t~06`B$Wk(1(qb)R$sK0@t$$ z;uF|MYl0mU@UF#aD`V?yH1p~ONar1kp&I?1Ri&NDCfq=Y`lSSsPSlIWSXT!`{?W!p z*)h$l@zkKhssIe8MG=PFk}ox}WEMsc(&D@=fbM70m-|e{}d2Hf? z)k7-oR6JOrWN7yG_ zoI+;)?z}*>GM)-i7U6k_DGEuuPvP3uRM@t=<<#^k2d4+ucz4iya~qJd2l!+iTYM|1 z8;7SxV8isSz6YrPqzF2Z8L%rb%KHCtbmaj_=3TdWXPTzWoEEdJNU?jd!v% zGjp=saAC?_M8?c55oRhgD`nEul#(&G5|LbhRD^QPJr`U+MKpI&5CxQFzF*(}nUBwM zfA^kq?m4Yc*|v`?;U6ANTB2upJ9|&BdojD-hW3tMdeaJB!J^M#!+;*<{vQepk;jr) z=*FTK%a3$_5OqZRpT(|c;@Hb*DV!F)GJ#@-HUH?VbgIl`>X(T?(MJI>f z_^l=h&;xY}H?ip-;&&cd4)e05{!?x5X+_XDu_rDVt>YI;iA&KIxYQl?My3r|@13&Q z!|xM2*N#9G)hccIp-c2f$f&Q4Uz@LoX(v7!auJ-II%FHqC<>G4{#kS_;|2cVCsCdr zHS}ET7-A@Dij=6CYhII3m$(Ra6gYZg*Ho=B`0kpj=djxLMX?7nt`vPghGt=o)$c1{ z)S+vRhg>5$dQEE2>yg7I=wO#7i?#j*-u|-!6X-#4L{X+FI{7hMoRdeKzmXW|8U8TR^ zO$&Xm9L6dmV6VFOZ0gk0$;}rP1fbG=#A;f8 z)L?sDz|u1GzZLJr#kef9F~m^}WVYFn0*tMY`<}y9bHFapp3)Ze_<~w}{3yasuo`A?4rs^0hV8%Bvrkp9$*fp>H zV*Hq@&k2@i5%`*h>oB7rWkm&zrJnWF2jPZP3B3cjnIJ5&y{!GkG7Y({jMUO&-Z5_k zH?(1mk5a{6e%_~p{C6BF#DA%XA9%kJbc|uyx@M~b;)S3-3O{9mu^F^_rqRwQI1nO@ zMFFS~Zxkr=`=R#3qb`*Leg+7`eo~e+X|K1`mt7tC@IgH04W>qq5}EDfA%U|W>a(b4 zRiF~!!ayMDfx~G(<5wwPRR8OaI_+*=4^LO22bNqf-&NjeNPM;NE!;EFW6tAay$-io zN4+;fkP+pB0&>okT*XxF;2yCjJ;C-i9uT1)NdW)YE}lf9cVo<|`q6sI>Ab>rITl5t zF=0&@>$ibd%3R(F#|ua=qgd4aSVt^d2Qs*ZPldDarX=_3+*WU11Q*7x{{NFRZD`)& z6ZC+W;typm=7h+ydsbla_lU!f_4?yuk@P4t%u{(noJE^R`uOfJ>iw#(iLXJ)m~R)@ zm26e)#!voWIRxNGO>9rpozt;?-ql2*CZ#0|N1J~Be6!vXHxl2Z4}IjQBc7lo8gB+f z%2P;n1iReT?n9o~NyRFt3`hJ&L)-@~h`%S6{>_|6mi8IzAcGUoIWYpT7e+~rl*ADr z?>wF%DoJ}g*SGI&P@PkC4X|R7@ASB*4lTEINI^nR|A#Nv0|HhJ5tFmvVq{mX_4>W_ zjBf>Ig0POtZ#kwCPKOLP>GV3Gao)Vl-&^j!N<9>f&OHxHVS#5FR?fgB>@|?RI~|3! zzeeTveW88E-T$yi-i3_U;2@$C+>3(0J2ukmeJ4V!HG=?hua&)V_93hD)TEf9l8}pQ z?>~GPkh+`{?I#4?y6c4M@k=Q4Y!1Hb+aF4~&CYl7Qe&94${HJKoT57C9jLGw;H=iP zo`21H=o~U$k!iB6M{3Yj{LZEUlhm@j8@Z%ALHSqGlGa>iya z$euMZ!G${0x~J|sLF9;WN**k1<>6*)8_&!FYg$dS7V zDHK$@<;m4vOc77v)*lAf#{4F<0l;W^xUPwVR`#AYDe(S@0Hi=z?R!|AEyCiXYUL$G z2$ekPRY5W5lUp`XlY&*6OBh9LcUCz=9=p-{-0Z%y@}I8LU91lDMlcP8DC0;-Naack zA=R)mM`7{KH{Rnpz&%~LIXQp>id`NWpl7>tiW)nG{2;f?faDoBUutuXLpI+gO^34n z%;I=gmtAx0=8Fy-=w8ABy_Cd6QOOf#EcUtsJNnV;^&1hKsp`6e6g`*M|2&X?QFQt0 z0jW#uQ-1xa!kn)z_27PApyU<6BTMhOl^U29U0k{Aec<$(9RJ=AL;j<0F!qU_xIBMD ziu#tzbro5}rB)N;{{fTcin|}8k(<^CCk!X*7oHZXhtwwxBaf&3J@gLt=TZZIgXH*m@@}Ftdq?$?uiN8K0gYS$9ib04bXzjw^5DgrVv4$Ne7Gt~ z(a9d3qaVt#-xQzOBZ;ixyW^?9*{C%Fd*B7bh#`$>k5Qu9T7r#6gJmL>&hyO1Da8m) z#lgOrK?bU5}n=i@nSJ zKpr2&nEfpy25eo`vghv=Mj2PV3#_Kjm-*hAJW_N;tmYaaFiiTk(9yc)we7+ZNG%fbQnFMo z&&nm3(w@4a1csF{TqP1RkN2mB{rnnM88TN{nB_Jcr#CMcT2W9x$^sc3QOR=ZfDF43 z!VOH+xpm;m*=bv?lN7ACPF=~EU39Usy_pTytcfS8JWJZ#70@{1t<@L|J85!}qIcd! zL>CTy?w7{n2T7BwMJ)ZTpsbTd!ti_a7YHN2|eVUvi_U ztLj$32C2SJ&&3GqvK~D##fmqPq-i#?<9Gg-IaDvx&F_y6C8Jp*H{o1abV12e>&X2C zLq%T1!ZfC*`rO@(t03lCa8J5K=X=z$i$yQKP)2TT{QjIZUEuDOcrfb7=&vb_w<=Rs zVfXaEVD}31a>{%?W$vHY*dA&oi3#NKXF?JG!D@oR>r7yjb{J=x^*s16&d|^tuvjBV zEB=wRoc#G%ENB4`M%VE#dsLK&g^dTze!s9}|89|AR=+nBRMT4)L4z;j)^X+nE^?j~ z{V>0sc?#0~@-#&m*u*Az%Wl@s;}9l!O5&O~IZCx7zIfQVHlX5*V7VkiUg-#W;wkn# zPv~ysxbAFsk*Co~lfPyO3Oqm^`{7gHGWHm@53&bqVi|#V=HA+us<6G<=Ku#zx+&0r zTCkg&_}s^P#U4Spvylo9KEu3zz};*jrEz!5Q%wlpfsZr1m!~E-F5^I&P}`ajk>qG- zy-JLt0QP92&`(S5kW!7-yg;mD^m&ba6M5<=tmxdgp>08O>yEdo6``U7fm02`1L=>M zTV;Mqpm;GV3!MRxtzAA|cbSx=5_iZ>mqL*aaZv1nf;|fBU9Yp&_?Vz8{8qKJi{Qzw zqn`nxj>sd+aE?wk#zz3u4C>ts=8`GLM%=Z1lpqmMp0u`{TMC4KUi;-h9Dn{pNXJY|2C*-YOLuaakpz+EPR}fk&yemCwyo~fS=h}#06JE*+%zByo=Ajz+{AW- z_=~juhrJ0g{Y+li0+)Twy^JJuEMWQHO>?6+a9m}Wj#Kyp`iKGS> zX{s1qW{S=prC|LpQ)~c_nNy)^_h_8)YB4AT@0O5wBO}1mN2?`8FMUS&tltVIXZt%> z=1$rt8RgKg`){3YN(sKDcDWB%$|2-rF|%abZh?45H@i9^d3w zB3sC@LjD09B63Rfi4d`ogIq*EglXXT+k~~DtRRn?k@~_B<-$O} zfB#)i>9DuMPhhjc;3@pN>^>KWYkM9?pYnflC@u$R#<}jS8*OngwHHo{KVwy`Q(moF zQ&rdyd!6~X(0Sxe{d|HlY@oLJWB=V8KS=i14Z{~Q2az^+vN%ii%X`q-TQvv02R^at zKC+~8T<~0iF)@G@+X3G!eK8GLy zIP0^Zb$y8th8|^iVS6Ffvwy;Z5I>B@(ZQCdBm_LSV!%NKProCda>z2|@3c8E046O* z*~6Y)PwtY=VOl9 zeGNvKjV8dr2Bq3h=|;;8b@J)_PVM8|eAlsjR7Z|p{2C^l6gO4+Aj<54iysM zo*b45ZlaD@Rj(olu@Ie5ca#!eIPPd&Wlsbq_fmjY)9f+!H8=k`jQ&i3@BphQZYlCwa@cojJWz0I)T zj7ZADS%w~K<}PUx&$yP72CUu7TMMp@&feqC=WHPirz1BT@QiPEI6(4tA0g{^sA8KP z`QDX*IL}ImC|0p}0q~zr?A1QOOP%nka2hp}?dYMs0a~=9C zkcb(v;e)6CP6r!ZZ}3!y1$=l$!|it$vYYAS?I16a@xMGv+4Q7;{0?e$e`T}2yeV~TcassFphMk@-_h3Jkm=zUTD4BIB3)tXq~uXft_A+Ws)d_OdWqwOh6 zkJ~fy0>egpr2o$=RRg%Ou7uz3Eg*of&5Eto4Wf zD_>wI+rx?8j7+4jpuxzB3b?h53(co{xAou7=(}CwGNZ zJE9Xxs#L4&0#qqas8?PtLQ#(XvA#h~lsoxg+gLPXdXc{3PjrzRgUVY49TsV2C0S)D zlr}-t=dt=rXqMzwt0ox2wZshi92x>M4vK;tAtyMtt5C z4|OswpESbIT#)8n&00~PnhcC_hfiXz7jey{7f0D`&_K90#5cLQHq(D^tHP0wtKL?V zEF-D}NHHx{-Yd61wX~H!fTwIcDb#zeI~bX4w+u z7-f?xZ)ZDz%+X@{+c+Ix9o1qt!3k%O$cv{(fbEyGp?_rE0ixLCh_xdTvprnZJ1<*7 zyhnrgUExa8Q$nyk-Rn&d7wvtEq}_x6p4p z5_SFKm)RMcotJ_QL^Ef;B7@Q$lYy7UZKOJlJ%goIl^^*1hW;q9I)}dXvCi{f8B=vd zw`%;P(0gfcC-quTAbE=CRsVhi`QtpZ*K}C5k9>0Zi2IGNOJy>Cu33&svf!ExZHl$O zUUKS+{`q)q=bv08rC_ua^mS|9z4vIci(~ExLG7`Z{8AQbV!I$l-h5nN7*(?_jT&L@B6IP^Ijp33;D3VVbv?gXvy4}rC=5Ffs0RJx!&{%;~ zvo$|RiFUk)6hAwDzS#lNbT;+KO?`H_ngwuu{rlxUWM-=j{t?ZihaW`gaDlw9T3uaL>j^ zKSkaZUta!qi}4Wol;-{cXV!`l8amdI<#6PjIH6VS6m`r9Y(YEaFQqS)NZ;LkZOZd? zoV8s^Cl-aK4}%AQtCJdvIk@kXb|+W&_<|+zz3#s%Iiv_m%2CpfF4bY`RVzTslHOud z`_%4ia4fIffVjK=n(fM&3>EqMH+$z1Qmd^YS0D=PSfYdsXf0W!w?=6|?iq+yp6vwe zms)gxCyJtCCwT2--NWDy_0N3t=@3366x^F%OZc;Lr-v>d0OR4I%5?x*)UB?VL+TKq z(9gh5HQbc!8<6Yd@Co>_Q>C{5OOajl6pPp_>&#Wu=3X!|e+ii%_`VJbNPT$>Vqq@q}mZvM413$M9O=KVCUQA>6cKKU zy3V?f*;XKPS%PU)AG9c94t@YLA(&}yNKXp8Y<}B0`H@gIVX`4jd+e|1zFIRT~UEQ2p0e`0KH zr6dK=1_i%&q0y|EY4IV@7X{UcbL@+?{@#fAuIMqwWyc>fSKRXnoxLBNVNHYPUs!5r zwV}CfO^Y3<1c{aK-z_P=(gWk*;B76WbSM5O?_g)m;<{-Spo)A%t{5iz%Wp`&?;V** z$~~7k9a`kc_JX?*qT|nLj0dh3;`r&7LHZ0k%_VRkDQF(lv3UQWccu@O;alq*()o1R! zLr>$s-l=tNlGIj#36dNQw@Ib;(rRj~=;lM;q(?aer)Me#_p8t^9er(+qfy!dY%Hc1 zSLGL^UH$&U;{i+YPUzrVyZ%gxKm)YCFoENw;rWb!*lcp7mYl5X@r6}2DjW?p@2Qeg zfsu&tI#iti;TXCMzBVUxc=mGTym%5#nT56;D=E&+z(~NBpBXmu@vBHrgJ_MXpCRlh zgKDKlJbS1+bh*c5oRZpW#EPDn{d*W+h%JOg`H6Bw-3uxdqh#`Tl2tNDwXtTEEKy3Ds=?w5kv z)BPrjCEjYQvQ%I?bUAqSy7jsn=*NmT+``@quV<`Xx4#gZzp6T9}Zeam&%?ju{gkq3}NI4 zwQ0PiEB`>Wayr@)WHR#t|36W3OG3w6?5BTABy>Rx<=hsSQW3z0HBvCCw% zKdr5O5<6+45&%B-VpJI*Y#@p8G8#yJuhi@$cwngQLB_Om4O`5fDpr-x~#)$urAiJRqrR&`q=S6Lg;0dMK z8f@Eqty@<$ZjQF&2?^M^os*F%o}v3=b4i^Q7f9l+I)KCBbF^>bP1CfRh9rgdQ8z#_3t6IjD|tCI<+KVy0PFB8ioH)I&@Z@Kyb<^DPvLx2#O9#W zSWpmmMwwsWbr8qs04Y+z(73q*Nbs&3?Da8Y$Rk_zeuX;!X`JBLqqgfysQxgn07TXU z{z6LX3Cf!}s55QsS@-{&eZt=~9w4ap*`;x?mV24Z)X6b0VJ~^mEMG9~XHZ-W%LJXX zDv(p>HtCIZYKZDC_4bt{l?(B1cpaemXu}ZOBdJe>}UCY$eE1Ur5D@W43JHRrTRI1uh04Aa_A+W_37P}mY)8ib# zI2LlNM#hXme86(hC<{^zLkYn7Vv<2(io%I2#u?v@>Qdp{WLr^e0|zGktOb#DPNsZ9_!`Nr0+xfZAKdHu1!EPzBO=Zluje#?{#hqTvri_44MFp?%WQrdM|h z>|bOo=F0$oxb=X{8-gBUx6K{Hl!-vyD&B2Q;onfFQsj@wrD8X0rQ~De-FZ^m7{SGM8 z%y@+XphlK zsGL3N|BJXlh|wRJKHqZa2uT{&I!2V$z6z8OC6B$omV4an8y~HEOI9yucYz$2lo@&JTU@dqf6CL2GXuFC-{%mi;{kQLsfHd+u^}Gwo(pR zYWU_=VerX@idc`@1amDR6Ibu);auD{d0FA8R$AKxrU;=yU1apmK_j)!&h@S??F;1u zt`+@8(-lnXR!i#-WS7fA-jm|w&_}2@_1o-3Z@e2QAbLTik;_c%U2iKupIXMck(K(* zkl+u8^n!})U9BqO&S9?G%&PVTu=AlNYzR#^o(0ov4PolZZ|Zi>l>02EUnrZCsVmG_ z)#v@8!OPiHwL-boQt7Fqp57WabqDlnM<1^neYL%cVd5sDUju(!vjUe%?OM#xPG88e zx3mpoZ3~rlz5iQltMntqrFE90CL30d4Ddb&dPF%WLJ7*<%|J@)I0i4U&&5;-H{^) zO7j|0uU?R!fjPi}c)M7Su$uGQZar;A+6a$EM7}< zVlhCl3}p^L`9rq-HXZS#XnL&NinQ)SR6D>2Dv5l2%avktrtU`18>1N~!a?$LG1ahV zJ?U2DF_?6*LYT^a^*62Y`7w8`XQr`_=8Kce3q0~KTzNLVw=;X?=-v!5qb9H3p#f%| z6rmnJUH+F@^K5s`&h>d)2tBRC*$MTMD-Da>BS|;FFHib_U7Qn5P z?ja+@tA6QioZC(1*YlkMW24%Pt{Y+d#rAy&E@cd!?|-}>@dI!(seKS3-RrzVQ#rbW zCtHta7j+rmz(9~T0hxgL|=9qh9H)XzlJ=#S8EqlecaxtP%7{UH)Mz+-$_vh}#u zmf=gE5;E9n(-iNc*(*I+QwI^>fBqxy$Oo<|Xi%!D1H)ZEfqAUi=i0M2&2HaCQr|LU zW;Mu{j6k@W&phAQs@0YJ?)zSbZEtz@tix*7m0qC%1N&d+f47|Bc`^@Cqx(GqfW&^f zJcGaf7NH55bi20^ekmfPU0>j}^TdsX_bPI?ZuOqLQgt#1{@ z+0K5|awUa_JpJ>EYGoWwm&ar5agShCWB3Pa zj#J)a$S@x}g`bmQ1O>PK263BZxA`<|OWvMx3)_OE2Q$)|qS-22n>seJnYg<=dcHU~ zlxm;#i6F}?3@XR_iMA4(HMoS2kR3~qPG=p@S+R8I4rKd4h^}NcXHDIS~28% z=Q}ONz(WwujNnDJuNm=AsaQr&L?4hrjD-nb&Nlc>wGC*;9g)4{UmhPa_R_g66J z`08iM{`ae-^b5PQKN+faZV)AVWn*t;Tz)q(%DiYNG+1k71+&5!hYQL#hg!5iz(=&V zS`Pj5(3M^bxfODfGMwEq#M~w;pF3*5N)=>u$Wo|K9c*h ziKmx;QN*|eWzEu|X3PK$>@#`}HHx(~Po8+KXQF)Y5BCj4u7+^S65jQR7lEc- z_iL5L9^rtp(^AEpnr8ioZN=>|`+S?wR?)xb;@FJO#qnTZKzt9K4B(&FKem|R_AkbI zEZ1SZwKi~~1&Nkuhl5Vjr9-$Rq~%nzhR&J;rKF%SZl`I%vKRt+GWBW^vfU3mk7#6W z69(T06{so>eKC#W*}sCKoChubfAt?>_sX8OSY~$f-UF)-tuBS1x51Ea;*`eO^`jbi zJ%BTUe(Zi}T>ESTu)u{p!?=XYp68y^Qf<}aY(n1tF6@>5jxoA=Rm6@j#)$vj^5fTX z(&UOZ=8}y1_G%w*VJQ8?*Hmdvjz7?nBi9O=ZaBXU>p$xJo{|Nda6-I>2vHWv=r!U< zYcr>*54$=ABF$v10Wvl@%;RyDQ@r8|$r}XJRvNr|p&Z%y)RUI%Uc+DjU-vM%D@zae z`=MmzJJ>|U_|Xg}UrU^qaT@&yh*zNY40>Gf(VIQLF`mdWKwra=j&wzhHMO ztk@WDvsG?UZZ%!yFBolSH6s2dRpXs;6gToRqAr^2=3OC3E0tpw;Zyzkt_;+jhQp2KaWt367PZeIl6$--?KNa zubX!>LEaJcNbhMong(0`>PucF-*Js2G_cgL`#eM8JI!mO;PYjDs`cCV&v~ralcm^i z6g_M&Y@!(MULW~RmcXBVu~wtbjW zpq=;ZUvnAZO1?mTX6}*AqGsy<7|NIzwvPu9&sLU};l@P{S+25d zN|;*3jU~&>Mbkqd5PS_fjQfQ99v6mB9X}?^79ksMAcx>ai^ttJla~HAVmzzMj#ybI z0;f!hVxN^HL8RZjvODe`95aDyoJZfiwUZA&u)>@fNj%^j zeLsNX2RBHc)`*8g;euI7BDI^FDPQ0jg~CT&kjLMTvX;wjeLE0P>;6ObK8(?lMVa58 z35D;u+I<_#P9*qfPh8tOo{}ObD>-D%TGXBF^Q)@K0abK|V}(j!j`X%l`N2w3<)I#a zQ?zHcG~vKbmiogY?VIlT!P@b6z%<_Dxp-d|S{+|r@^uYdXU zRJPo%b3@lY=HD$eoT>{P0~K${h%5%*W#2xzGvDjzBHe&a$*p0vPur8tP@Ut&))9Mct$CvB>1tEj`B?J#*;BYX|a=A5>j6X)_vi*6sgpSp>AU-!7Bytf8zLNxUsGx%<5>Ppzy#Qt; zx`*UcZ%mi&Xh9WZd5Gvo5carF779by zW2U*|Bxc%_;06+-?_J^|jOK-1(Q1rO{zeE4D3&J~yHV(W7MS`q7-d12Aub_e;atp( z=Ae~qfFmbcrj>P=;*LDQn1+5G>7Pi5Awm-i6+t z(vOYk(efLUCCPrvJ@P9MK4tjMW?qlh0Z1!*-h0Q&mvNZ|MX#v%X|P;W&RMdyD^}Jy z1-tMmnlgW~ZT4L{I275mK?=G*BEPo3rOhHS-@+rp0AKa0sPj&5cD3{x(mBMxR-QS) zX_IH(n_W4LZH#3Fh7a>1lJ7z4d?K^Y6c%(W=bnY;9Eqo=!eU7_v00lB6!X*I9%6iV zW`yl&0mYUx|FJcC1{$9y z!9eHYR?uM*&h%SKQTS>*6tx9QGF#RID2HSfKjpF?DVtsEA2eUjDU*U4>~~3Y#>smW zH)ODO79!1Povg(SYw5OoS<_E5Gwb+-xm$lX%zvQXzRJA?zU`YFAH2>#cy3~1Bi{5l z_QZvfs^(SPnPnY~{eR5sg`csqK#_S5I;DBXb3-$xZ}RGtxnIda36Nc_Q9Ft8?zKIC zk)om#W1~z;DVwFTg%*m!i#XDW7aa-_J% ziB9|k%`<@=a0FOfVmNXiAxeT&l<2nA1pN9!qW-hdk*LgWy2X4e9T#PfKoh4|oI6N|TJjYID+Z;Y{mSobBo?Q2%3 zi0MOeI5OG`!vgvbs$UT)?kT+X-z|X{@TAIb z1+H~b>hq(Q)5*q$hq{vMihg`|UeuGxxQ37x$hozHdmN~5C)Z6>g2n- z=;2)ePI!;YT3r|`0$oe_6ZQgJ`V9K@d7ee<%gia5(2cTvT;ZyGuzsf_Z zY(d{y?{f{JB)*(%nA8;n?gn)Wb)+whSOt56>vO-M^MPgX@0KA=N-gC@7y5M1C-DgA ze-maX9U3z86ko9e+H_Sy0#D@%>0Yg8dw4!`Ld5JNgv-oxO07jnvA*40y$JK1p zo?P5Ssm@sfI8K+mr%WG3M5}E#0{>V2A&=*{PgRwn?y&C|mrCm=$zP)Yi6+36h0(!; zH79y#tEtTFCe&wA)rx#s!h?p>(BWiBUmq&~Cew$c=*a^@-y!?D(D`LJT&EZoUnc`5 zl}3(Hmjw;|mKrjnWI@Fwg!BEBE*3OrJ64V>#|tD0>s4r(j1arYlIFl!S6CA-VHITh z5>EazH||Pzw-XBeTVDkAlvg&gEZy##YUWQWHX3vZKj3~>dP|5a}T z@6=;XD>)J6G?G2f0+`&<$Mx__%&miE0&>>4TW@*cCD3r}2Pst5rHL7-cFB1sXfz^)F z&A@c4;#ACtma>>D_UHlgzDt7=J(ax#6KSb-*Y82XfZNHWZ~41h(G{-?>5tAIN0Af= z6L;JbfM~0s(;(Gajd-27j7CmUSIHx|l?}R$`rNxSZGS4E!)*hLo+OnRUpUpGGq7n0 z4)^WS~s$ULDDv=GH9>%9AHp3e7{3I3eO8titxh z;-F+@= z2_-@qx+SI$KIYv~8yBI41i1%yaMeUXjmT;$yD4?sq+jNwmlN9=*g$+|mg?>u1dFAyx>Ei!viAub|raJ!)O?X0I~Lr@1DMQf6S#hPRw5 z-stxFld___X1itbbiaX%n#R50p47=y9V<4C;D%4YyscBMhftg8*^;&B0I!&MqQIR8 zBH_^!RQzLB6~>~-C&c7(+1oCXW31R>vz}!Z?~!6{C32a7JTY49c}(kyD(u=*l=lyb zX2TNqgY_R=m2jAYsD<4m?wQGnFq<@~_4#|1wQzUbcwlcM_Inm9sv;k8Du5ZnvI~u+=;4J7}353vFk>xZ4R?` zAfI)UMGNr-F*jw2Ud382Exd67$S)9N%3Oq5Lzh)-#uqs5b*#lW*UPfxFVLr^+6L8C zCBcI>7+D)MDn@WM%j}F#tDjcWVI7Z0n~YPT%6OtaI9?eWr)OxtS0=WgC(FCL(G|nW z;@}Mrsf(&B!J)b5I~lBLTm!`X)LKU#KP4|t0?ITcgXmdi_|@IKa&UUa-i%9dZ(%ne)uRXiflRJ zq*^q)r^#*b$cHTEAJms4c1;#3qixwTJJJ0f{>(f5KW$HTwzMGM?QdTI^_&gV3SdS3yAW&q#G8|2uZ%NU*4C*)+H!|j@n9>ND)#VH0mKwi1@wg$Od$bb&Tvj7m zlsLM{0F`562pLkujoP{D9d}bwl>r5yTfO6bVUDP=^w8T7uxxokxU(RT!e*k}} z&Y~0>rlhiDQ2BXmKSE>0+BEAPW;3Gj8SfS{I%Uv>(FIgRU$Yhah^n6mlf1uTViEhW z+ZjW7I$p7iUnnaAm2?cj;KjTt()N72`YN(#zt&mF2CfE1-Mc8*1{X@7R9Ca_I{mxF zvKK6}#CCWn?DC6=Mank^UjT*kgxN-o{72Q;J>n1jkCrn@Lk3c|YR42YU z*v;uH^tH8`)b5J(GUJaZ>zll9=MGLyC&x-Nk<%49i9XhaSXPF=@Z6kz;q*$!WXe-L zf`mMDa$`)4sqXMKTDH(n@1oJ*NE>6Dv<<|Ho4~Bh*l0{(D`^+cf8Nv{SZJL%cO;<^ zb|yXpB5?n)h~Jt!LemWgwl)v*AcM=tL&*;#V6y~NS-p3oWavVa!ko-@F9jQ zHx8tR9F9&)#`){NqPRyJ{VH(NyUR6nrxoqAylR}`oLzpyoto$rF%(W3($hWIvCH$d z%;)e4+fYr$_2?**?dgfdBu{RRaZoJeBMBZkhpY3bu}4vD-_3f$$w^bC%50>|F^Z55 z$jmfy+}eU0>HNjb+&K7`Pu=wgS6d*vp9bHSO!ox@M&oMPDECJD%ET+G>ECcvgo&yj zS3O7t?{Lalj^gZ4K^DgfXBc%%Dd-Y=B+DA7dYV@D8#<6=dEgKaf@FWg-Emt#u_H-& zlm={Qnh0_7leq6$d)F2M3Kz!_OFW>NGbxwctKyW$9?gl2(i>y5?9*I z4Y+|A4=2dH+(D>8t}Z1kR%`3>*PJv#J5X?isq$eHS=jFf6jz^sZmmAa{%ZsBvn=Q3Y~2{j-=H7=Ct587Bg*qF+M167cppaSAJ9h0{Bc|%6QFw zBws*krH#F3 zw2IyUuw`f5!kCU|<{H6v9kqcC;msy;nncHRFYs)U;iv2K3_d~sb|Ac-H@M^yL9brt8}?XQpXd zj44wyQ+{RTz8f%*?SeC0EAGJ*9G2gfo?ynKEf=YD#5hibyU%3PR?V3y}+w zkRq6x3j!_(vds7N{a?b%`z-f;-PbZz=`Y)`%pcn|px~dMJ%*B(6aRziX8X`ns$LdX zTiOidF)gw*7Y?00GG)6mqB=z%`fbdQ7xQE=@<_`;!8rH$OJcv>u@nD(XMwm-!JG4u zhkuxY-=eE8{n>A3Y5L5BxP?;A2f8~DdbT&f^~N@KabatcSIfg^$w72l_v?SZt8(AQ zZq=U?SZKzABAz_)`jt-m>>QQ?)7^#%O|1`ZIoa)<8ur44{puVIo{J9~Epa%xMp$fS z#$&ec8dw*_DIVe6ha5Ue-(BBd%0PiB>i48`3SHrHO7t}qALf@k#n+566X|h(7K#0i zXEebDD9vUD2ON6oY6H(K6Pv-NQQ~7izZXe6-uJ2fDct@m)m_%E3+hc5&$CR9QDi~0 ztJH-)DeB@-Jad8Iq%-*0(rgjC1u??VQy2&(n?H>T6z^shW@GQm0Jpt?Yaq=;P^A8_ zdm%$1SS#mBT=*Ksk0eVHb%7fF=UnRKn9uUsb$*jv4ygay9|Be)hU7uo?$XYqg~1)o zE$>dL@=8^!E?@?E>mSUpfr~8{Kf%5=ooB4iDyX~+q753sW;C6<=sO<$&n)$Dt>Z6! z+J}4KDJ4zn-$hyIQz8O;S3v)uJ@+C;%*>pQT88IhY)!TgkP--P@bq}`lsWgGidFRo z8>Q#x(2O=|(Wc?4fnErpQa08vFr|Ou@1~N(FjZI-e8+U-a+A#upX}WCXDxLRjLbn3 z$cS!Ga6~hTEZIrKh!si8tzxCJtIiRd|9^|v8kDKpr2vaT4o>x3R|M))^ixeTNRG|H zW#6K;{1f2pRwtg72{!U7x{4gJ<|mf}{DJ70fFI7w+zar+JI(2S!!pCdeJ+4iJf+^S zo$vQGE)dqsOp1x~p}bbcY_(}>j2#OEagHwRmoFTzPn=N^oG> zHvAgyhws+x!$M}UQOo;X!?w4kKLptHtTbn8rX$<`ad7cP_n%Iq){3L2u$dGou^AWU z3+ks)!is@>)6l}gbmN)eOT|xB9Lbj_Ouu3EOa~g#@V@eAkavd91fpG3MiO%~{F za_aPlGcSNe@7lHOwL-TSJWjOSOD1NG-%b!(C~sQjC)AKs7RZy>w)dEyvd?NLA&S5M z%HDG<_jY)ctVT0y>T6iSI1GsF2$Yay$ZujV0rRU0s(Q;5=?&}G)09nJW=q{0r!6+% zPrvy0yT|Jo)mfB%b-*Qd&ct89m>l?2w zO%d(cp4!-cJ^xSD{tU({#ti`x&$8GH_>tuBJwB$Rq}z|yX>KiJ;pXY9AfM98T}S$c zV{Nz=lOH?A=|tPf(&lzw9#D&i&M`X!jmsvR_|Qy09q9jzAOaDJ&01l!6|PmEfiBc97I&@m zd0v7qc8o++2g+2z5c;V7-j%v2N8Ue1v=SKt4e@MO`?jDo}yy}U@i4U2U7#0Ta7Dr5~ zSO^#U2iaoIut;6H7&dbRKWg*~@#*Mc(8n{@i-}*i8aUcLJ|CfXX5RPbKZ<4Nnzk zH3x5XcMR6$iPyXf-3-8OP7dZN?9d!EOCYu4 zEboY`%KD-*yIS!peiOE>tK2aeZTeIJ7fb1bffnQzW`F(S!z9Esz&W^5yvRWOyUe0p z@V)battACya7M4w;>Y9(V=92aGot5^3hSiqjf!g3xmo5GI?UeIc*E}Yi!Fgf;Qm$m z4IWjkX@3TD-XB?|6OeYSB7$_k#Q8{aMF3C){QTt<>L3Rb@dW_e76=#f%3nE6+jtHK zcHzUdJ>b#?!j(|L-wYFlneYpOG87YX-=Yw&R5pFM&x5Q5fF^SwRK3Y|+ zyz%X;DRL$u5KJ$&36&jB+TnI!Jlm@K(~BHNC}2aSWNXELsr57!m`&;*G?_w$kf<;_ z#l`Gb5Pa#kJ#w#prsiYB#i2^7oy}v`5*R6fQtL-ODTXjf%4F_hn6O~w4xRKf6%QFc zV@5VCqzmw|M&>Yn0IkkBXH%(ew{}~g=UE&gns4>fe8A7T@(H5`#k_DF&he$Co7ZPXJwov=QH??6kM=#=ylT||8f<{`X>NGjVtrd~Uug9hx zZq>x{m%7LbPt|`hf+Fofj`K@ ze8t5JM&}I$m1raJ z-pXd&G^*9qQPLU|qWUkkjn1TOY(s9IH#w|upWK4o(Y+hS+Z$MI1LhU=i({uPc2|0i zo)kGenA~l`L&|5T1kyj)qq#%!98C=oh~cApNY19hT`yZ&=*6%DtzXiv-qvntj+QX_ zTRMS*w2nx88U=|W%F`W6xE@p=&2TR>@eI&XRUl??YTbGHhf=vNRfc=VgsP7*T5uh_ z7cM9s%(o|MfN**c-PcT69HS}-pD|l8!$YpJV~bjTP_F6}qa_rO?Eor^*VqHuc~T6z z@P_O5-2v+Ed6QjJ!};a=UcSjf8#5$ymTc|sD&rudY(i4ttt8CBMz@D{KWd2n6pgcwNI$jEg~!?GK9|K(C_fvS1`!*3Z)>r|21 zLZ8Z7%PmQ#6v&uuhsi^yzWknk1%EX^N#&1-1Nql0*jHV>XS6|`S6&Pvx1w@@;pBaH{HsQTj=&DYjkIN2LgiNv_$$7@7KTJ-YCcp7Pl};%u ze3a3q+AZJ9EIg&~1!UdaZq(!YD1OuIVgbENSx}lNQ0QF0)Se!8!?p1iORBu4DLWHq z-U93u6b>SsuA7LBE2*mrws{gu?#uE`2)9(>%|+7-ulHH?_#EmL*hUcrlbXxP#i5P0G=K?S7aCg z>aP*Pv-c|h{?9}?PM)Y4BlkQBo@48s6@+WQm!lmFr|70Rgg}83t2+i z|Be8HUYAR8KfrHkZOoqBk0EFK(33&cPKe!(_=ar*C4$)xKo%%SAO?muLW-7~dD)^rKYj1>fdb$e!{Qgh+L9~2KO z)Bo1@Ib&VmxLEfbb!&Y*8?UP{^Ul05=s$hCry$9kYpvfDDN4B-l1u2bj%;6q+cPIf zOsR*ldmDWFs?7tzh86uxkCw8kH)L}5;$m1>K0tODClqF?%jghISwdg3Jyw6br>fgr z)94{JE#`aP=Oq^(U2vu8gW%>P;Go%QiG-=no4b ze{_z9z?9md`vnW&ehuoZ>~Sn%8#k4Xc_kGx(_%2Gn2$*5IV{gc9xFRHlDd`a_Yx27 zFQMc|^$M&d${BxTgGAgTGkANwdjfBie0JHZaYz(VXJc5(&SCOEl&W*0J5tmRC0!Tn z>AEfVEv)()Sn^_o$)hCYTd`#}Qespjv1;z2gcIyrhMf|@Tga36AFvUm@6cBejZ#PY z$eLhjWMq9T|C=?JYYfbnOpZ_6z*pZqnLzvZJ7YiTyt~24$-lv<9+|6%Duop~@{1RQ zs~G@_J8Ns-gCB-U7oA>cLh~wnTASYeZeQjHLMA z4Fs~^_S)#(bqk6p4(dN+aF$R^_t+mTO$#X-ntdj*zoSyGT=KtRg>{eRTbQaF9HxStrKX4wcMPzgk0G^mcZSYe{DZ7wc`oiYm%k(@Ia z_F=@k&x{lzLGg@H9vd#$LcdaLN$<;SrTWzW05 zyezo3ki&3{`6gA7sCUx3+$P|6DM3cS)TmsBO(m$^fj27}Hgr|CG205?!+S_@Xb0L= z8C$KF)`cRVZw{^a!+faqLzqy2@Z|)>npasv9trM96U@U`UaqIj^rs>}!5d=_HlFJh zIL-=)( z0omDKJ7knfhfa~Nrji2IpRHE{uzz%i^{Ks0n&b1Ma+bf4`p&1LXgxH{9{AzgPY)jH@W%>epWU>ab#?cAHnrEo=_Ph<;Zuc+YkMmaL+X zSb`1xg^YtuEgp;j*o6kF%Ac?@soi!`D((;3CoRgp*qKwdQd=Ibdd;22wI4T9d3Nfj z26bI6S#kv2p8t@BbT78UrTAW?cbPNq$zFBevJ3TN!p4$Lo4pC%{t4LiuPn{n&_6E* zYHgOV2F1|vsHy|4g+-N}FZm|~=H_tqPX&65hQ?-qU7tW$s=Bz{+jD+F2UQKckCw`~ z?Qp1v5VyT81>1Zj6Few6s9uP?Uv479J^*)XEK1nucHoj*F?bB5i`9^_Pv^r}ffc9h zW#xBwg50*ArHS(NLt}vJV%&v_%<4b47uQ*AS$zP7^9q!nL(u=ZY|3m~51hGoQ5MEu z_8Wt(Gh0E+e9vK)jM@>v(&^7P7B~X{O$VJJO_IM?kfm4vblyqxMv0Ffxq>uzRF`mnJ+o$Xn!@q$C zGt37d=I0&mR%*e_U9~5H%o4S!2mnfp5x~?deu|38OtG8nw!A{NGL(Ga;E+AkLhE<% z-|q?#Dl+>vj`DC;C>^?>nU!~Ucq+^h?W7IF52X{)jxEE9*a}wovIR_qbVx?z!mmkk z@Y_i*N`r>4QnRnnx9lOd&~4f;6090E->yt&Aj>6gH zSS7=UR12RdXHt@~v7z5mH9E)jn$sNUL+BZmcSkLW5^2=OM;>cSnD(z|ZG=+OI{`fb z>8@>qzh0!Ot|Sj>^{Pfo`0w;@wkL>=yDMJs#;@KW$#A^*`-S#kTf#jGnaPu&;kdb!a${H#Br>rUAL{>|~mH z`B<4Cp-4rRIUG3q0;OAeX3%z(D2~nA(&Pb3^;)C|r%m~GBm~Ra&!^BgigX-5=s{(_ zz6nJwYQ==1)eO)5%RwHp`$8+aQC*OkEHY`!zu*1CqeTp82Ko%sW~RHV!r4!a-842)X+^+ z)zB#1Uaa&4bW7CuF@|Dhq7;ULUAn*Yvr9VD4kYk#wEHkaB?15md}Y?79*D^?j+2*U zdw2<&FGtQa)E;TNM%>f4jOJtSf{SoUa%jAQebKBN{JdTeK0$=L&oJ=E^c%qSn~=IZ z-seMahV4~d1)Pd7q_yW7)d#Eog)N6rG)?fxLkY;N-EcnILTGduskr(Bz!_3xSNUs zN0!2jK5plj%!wt_*p&Y2We>1Z3ErTso*8jugOyf)d{g6RYge(4Nj-`sdY zZV^i4$f^96hO?5g`&1w;daT;VuyCua<~;j1xRZAc8i*Qh;f6~kqch#`pMnSXmR|HV z2_Kbz#Vpl{8U8>Oj+J`jcu8&eM$!TzVWMnDbQ)4uQPM-alE=}8#?YuR72N8Yb_go< z5sY#J6kb&k*sYj>Nh_bFUg!eN-`M)hZlfHg^{k_s?;nGXd4{4RJD zt;e3|2rsvSOHPl_7~bAKO8;pUhn>-@C1yRy&D}S-Y1+`t@){>`&axm8O zBio;Gt*O7;_o=#g{SAnb^^v^wm@kdKqy5WgO8+YMn)a@n5klLr5VV5=0m*Fguv-}D zj;;6g>!P7s3FKntxQf%+Eh?rvO>UwaiPC+>jjD?hO9o>!Yb-4h%Of>Z{Tg@LY&!1= zA-KZymBLHb=p`*%_mA?jAGucUJ;hurU?5BAA#!kcg2whoO{0|!##+mQm7s1r2PWvA zmA0$ikoKdL^Gl5&0{d%G5Hu?mdT~HmjGdfwBwB6kHrX$A{wR1!C_Cns)VK9UYy?*& z(BwqkfA_BGl1i6^p+@YD^9JJZSHygn{2EHz;K5AsVP2#M3*n=aa-Q_SOgrg;i5{8e-{*aL z`MGylBB^(PMU78T0yKBS|AFT)QeV>KT^$C`@qzRzilLXm^Q`B8cC(~_+(_*q_&BMg za9jMBDc$jR(@{Eqx|djBO?!dX5o6k{Fpzy{?}m*8T}4La0ayK^smr4jibSymF>BLa zkUs$z2dBuAgbNmNFBQeR(|pMPes}40qfOs8X3Q7dTm1bV$bCz?g-nn4A?(ekq6ai@ z|D>^R6CUj<=B@gNL7^<^ts!04@!1x^w}~>PEM<%3gFa~A)FM(|LXR4b=HQR4AHNps zkGT8UR=Io@xf3hIRa()J6R7+8?3BO6F)6LUM`<W1;OOa%11!H7!qF!DefLntX(yMC0{E}j$&<18t^}9pTEiFGPq5*82Vl4Oso28cxo&4 zNZ-*K{8KKHV$g;H^X+#h7Fk#{&4&vtVM?AV5Vhz)%`2Z-~_a*w=f;<${042+K-G7t2AXkzXH6m z5T7ln#OD=FEV|_s}^bLb$uX}_=R?};y!dcfCzmIORo+# zbS`6ejNrvbrQKTnng1F2H=N=75B~dIyo@v+!!P|dNV=I#xP_ymN01vIkZfYnhfu2h z>V0%ZI;x#lGdd1q$+OdGyjCY+?rovu`>vMQ);L1IOG$Z5_A+97TC#Qz2mgmm?@l50 zux8bVm=5pyrxB&X1YhEl^A&_b?-?&J>c^DrIR#DC^L%h5}6OG`%BEI^^_Z&W>m$;;rXn)Y~dVMkapqP&jGO}kiZi`KULa=S#j32fA z*hcuhN~xf&xRy8LQ&u7xIZmtPP!XN_j&*FSss6rt%py;W_D-0DXYC$&Mx6J1RPw;T zm@%4QVF#+iw~Wv_m!+i-Te9-(QH?cXa3~`}e;a;ue0gU6uLI)nV|VPvoD+$*@Esry z+>?rv!C=V{-c&T`zO~rvDhWd57Xh@>596(rVH;Oduz{@X=lOz zd2w~?cz_3o-dkB0Ia?DM)+P+BEwGwu!#%n5acuuT!w7|IFIqE+`VsriwASf!MS2-y z(#zSYHsEWVr6A!dYRqPhz{TtXgVv5?(f$Di0%(7M$geTzaunzpssD&CV>u2%`p$z# zD|Ls!J~)2nlp`>?Q^+|zFYjJ?k6SVa>12Wp(c50h2bujK)Z`u+Vh!L-%T>Q)T41aR9Qmj{Tv%>b@9Wvo+a4)wInY^bvDa-zjn?c;KjON7V?qqWRu9Ny)*Ho;Po#D*2H%7+4oDt_xuy_~wMA1U=G0!nx*p~crWa%A zxlIj!-CAl zWoCbiqUvL3<99mXa`1X&>>dFhmAmAl{Xj*v)HkMcNK8-(5>KEuYx{t;>rFAVm<&r7 z(EG_srtrhpmoZ1D!sEZuHm z{?#A+MRet9zLwid2xKm_3jM)01YDsfCWkp;5ed-kXQ@b7Z=cB+4J+dAiB!#Jcx8%lcp!z8gXdQy0CXJob7RXiNgbt%|u=L(h9>sRj4a_7TQ zlmigRH-{Vb9B%o8M_6{@v4_NEjg2#5*w&y*Y1IT2=N#M+eSF7k{!I|YMu>^Zx*~JX z?lSU({_jyrH2dKnLkjGR&$ZW?`KGn~%v1(jVHkp;Bu0GgcSOqocSTboC2njxHlAI_ zi-4_Pe3fC4W}ePl_nbIR{2)21JL7YS>VqsG4EBEQ)6JSLYQZ48AGaLtJK(SL8=E`x zd6}uOs#;Km%d+J^&}Cm7O)ymxt=#h`8>dlr*V3>P4vHR?r9%JT@6OBq6CHP=%Lt(& zs;!XVoi}xNLbm#9dogxHXwScZ_rMQ+ov1bHnFh4L3WHF?6&ETL2S@z3M0=mkkE8Y= z-HT^-+V)#K2xXbdVz<2yDV@I2#l=ES+D@L#T#&>sU9%Ke#U0CdaxC`gN7gbhizv!+ zhE)gsmwipO0|pc;LoXkmq&t&jh?(Jf8$%zj3-G7rK7CTf<#(y?dy1(67Oj)lwBiU~ zN$7Y!nhy@7x6uclHNR{tKE~Jr@MX=N`!`koTgI4eElR|vMtX*Ua zBI_Up7v8rgmgd*up2pO(Dtlo;?9bU9qdLLtC*!KRVZ4WXDS~E3su*LS@KAfC!1w?u zL9CgQ!gA9~y@-9A;{K4`2rztCP#yPSPlH%5RAG9rbV4WdK7f?Oh#)$nBwRUt)raT- z1%BJTe+xGw{t^b+pg7h0MVUA-RZes`ifd zYYTiP576apbIrR?6j@tu4yu|wsts(fC$KVKO|IcHbA+uvyfd+}@%($im#K@zj>>}5 zXBHOg8MFCf2-zDmCyvs9EALh#rVqQNi8M-A5Zbi6sDm@o^%O7V5p!j!cc}sh;(J!_ z^YlGGPr?6Y(pnhVHl4vo0tPX&lU)xEowmKx45}vh|I{aC?jrBZcF!Lo8$7Tn@sxVY zXpjuOKvs3CwrYDBJ2ZvbEuWIzU2~6;raj>o7+S}NjIbQ$SPVzzss3e69y1`0*LcbA zaBGQ+2^MLquLt!gjVad%W@XfGgPWn!bQdH2L&3`@-UDZX7$XTH>+zLJvC^fd?Ow-H zU&##^_5(`R8U?LKy%l8Wr{-2h<+xOfvUg@Yyz2d# zX-itWs$G+E!Br@ANAgN9g7p-q{j|I2Hqh4ew>WUP@R(TE5(A^mQvMbbUf{CYOPg_y?e^D&nIn)n-Oug6hRB0nMG*F zh8p1>U^k(Ip>6PcLA&T_dM$$Bg#EPAIBW_tE0Hutdek)$YF?{BZA3sijwT#By3_~Z z&ct<3YBJR+QakxwiVTu*A?%}em}o7a(~1+h$3g40qRB?BqP|N;$*Iz-28g29yzX#O zgfKg4^je5dXsLFCsX2F!f|p7>T84(aOFjHcRhbB$ASr4XyMytI!d*md*Mta}SymO= zZcZ(u57Tf8$lm6_K+z(o&?~ddZt_jl{hIJ2>r|VzD%dlbM7>=igfC=I+KigLgKm!} z{S-kGIVHc)4(AE6lbwa9dCWA4;}~ZZf^*rcuoaCXA;~Fv*fff;LiGS7SYfUv^W|xp zD)>Ym_7^fT3#+5$kkob~bp>QA$KxzHQiyC_!)7a@=R8N*@_!U4m1nXGy9=tby5aPA zby`)A_3U1hoT;c;4{&A-<8F5hJ*nuEdlDIYzTQw|zbiu69aL2tgg+6aq58>7c=b*r z`6*zD0=a7d$b4Js{5@psjI}5iLk4at(%N9k8_<3T^OFD3zDU{mxR%ulnNgdZ*J$-c z|Jss`;4(kjA^7484LyCLVpmtuAtgt(TRS*}P9)Q%uk;9{d{gz5tha)HyU`XR|maYcHpex4qFjR;-jIg(D7MFDL} z$5gIKok^Oi!6hE-s~6a|ZD<`?+^G?G1ax%ES=>2J2&xIGNc><`ALuA%naLa_bdFM^ zGS-eG_b{*Ea~#2~Mbf+gwD|eZ>WDSuyt-9jTf>!b+nM(eF8=MR=9t0*-Z)jCmV*Wx zX~SvoDX!{c8mhAayaSivqu&COQ#AeM=iIdg(Ef2GXupBE%%o4n!k5%!v?nKY*U(4l zEA77IkPfbE7iOzWhd#o~Pk%mEf-HpFO#|p%8=~_gON~F-pITsBhgCLZ0L7plpz6{Tg`8pa+EVv7TtaX zjZj#LI3rEjuc(11$y6S7%|D416nLhOuc~*c`K{)1$%RTpNU`R1ZLln<6C&=ZdlBth zYc(*9TrDlYh?B4lxCPu(Rsu@4MHF}^JJ@8@(lX3*RUk%O5Hf#M`1=gw?xAC%Y71=_ zm5c>Etd~4{jM|X{Jx{%@GKsk8o=h3_?J7l}w6_DU_r3DzP%W9)B_KAPocolV#_W$s zp6uAYuj`*rzC^lXq&Am12H4OnER=B|K+$+U`e)Ra`v;2RYnUBH+m2bxg_3iteU+R(BGS#rac5G;^v&{k};r+Z;Vi;&*Rj2=93Q3;CqawvINLrAi$>C1b=k6hq8Dit2 z2w@2+T3CHKY^w5@@9J5*=#(@TM3ROJwP};gq>?lftCQHLR z-bk$V=x&ovkq&MzeND#oJ%EMdbiRhCf=#d&4(4@fk1B?U-H}m*=|y@vF&tIkL;LA#;Di!zD;A z-6J?AG$H!s;+X9ss$Sg-noDPYP01Qmaj;vh>)FkuCf%EAZGE_c|8X{IXr9*rU+P4} z#RLLZ9Y4@gO_U?0p71OIwWuT8DcU~9>A&?%gv_LQzyDvkP>~Xw#Smj~+RykV*gM=O zsQfZ0Gv)EGaep-XBlpe80)5Fk0?ThwBgCVv!A;=MmjyW`&@)uGh^rBcU*K7 zqptuD-fS~7lWt=X`7zMkMt#ni^3CiVED7c)Nv9>pG!KSBOplY30=X|`KULl!e*UcF z8bDN40m=7{6T9!J`yYKmZGSS>nbRzzJ&%@=l;^S9dBaO^*%Y4@GI& z;=(lW5P%+h&pkM$djJ!)x@!@3;#I-P4vLMfL?t+|WBP$+Fhp|kp>TIhY3;x{ z{2|X)Ehnn3-o8-u$3$%{ttK$-T}vS@{@}@_9<>90ODtJsS9PS96*RpqK(Kvgcsi^4 zKszgpFG#q_`>4coRUGD?QcJwUy}^^m+bg*7FRrIoDCHan_sCj#ic_?!Yp(v_pKGF5 zQAIvU`i!$D?-Fewv!v?CX(LoIlZMRwi#`7!$ui{W*zd$5k3hVj7Tmq>Wy!pqqj)E9 z9Dl5tCRVXRex{U8rGx7D)$G!7(tRRnO&Ag@KV&+R1Uq|Dcz^tC{Hg?u*xE=w7=EvEDfSlZZCYIEER+X z0??gl1)s=TpX!em^+g0vRZ%bTKwS1bw~dy@4|E_C%cHEydl2Dc#Kzs&2-)n#BF?ju zrQ^;p?dJ5V*$ZIWQuvD^BL**~%txbIlWdiEI;j_zgVndx8Y)8L(~UvbudSL=eGAA* zWi)^*@)`OXRu_%tpkbO9Jfn zUwn|0KzQKK@5kF8Avli0C(iN-_rd4iGl%dhI@Z0((^F(-p3OnVy5{jppan?HFv9D; zm5z3zg0I&!SaX(yIdrDxC#DtVaPr);sJl~~Bnq&ZaUo*3+d-*7R9^}&s6JcNFump` zjr-p3Wr`22@Cj*L65mgBSu!Fx-40e&2Ycc*c!`hT9X=CtSkWfxgKSh7wGRe#S+?wqF@EDw3;NA1r5!nj|)_YBvEyFE0k zZr^7f8ruoctk4*K%6{tiyE)ecEttk~*@kVuw4J?)9#S3oRm;Lk-LRQVh*+!+mwoFm zqFTKX=_Ve;s&9jNyD5~Nst=TV_-wq9)3)82!D_KGp2;YfL@6V8 zfEM?E9275KyV*_JD6eg8kdiXc2dYoSg;=Bn?aQ6N;%YKhQ%g&NjeRT6!H+I{>asT( zGpX9+p~ER-_U+6uU{N(MwdQ?BZMzl{&3FUlz|fN(-d=3=v+&v30EyV&vtRDcHyZoh zSN#OH9(g&HdM8L^yJqWJ&zveDU;l_1m5*S*rWR%TfzkFkInfU%ua$;&qzQg6hBJuf zYVw3@MsoX>dr{lhFxW5YG|{+8$Kdp%iZxKi+fPb!nrva8h!reEsT=>&H9+lyWTvhV zHwTcxU6+&)J*)jyx>&!dr2Qh|6nNAThXxbVcgya&KS{p)``!E1>&v&u9K}S1kKn$oma`a}J@ITMBj8I~}AB)vdc5o-sXm z!y#Utxxh#&tLIJ=V;~*7yRr@B1sM{SRuepXeSiNeHZ7@-nvV2$E1X;iHtO>ew{g18 zVkSSzBR+Und}vPjv)w~Mo>Zs_(eND}*RB@D6MRu&jd?jD}~70a%d=dm`&5=sBT# z3|Cy5p?Yg#0<;Gi(bsz)6414P{F{L?=8QiL zrcm-kZlI(fc=U7pW-d3=TS~9bHlwbhj0XstNu87JYskbjO`H4FXZh5aDD~+PdU}nb&o94Y{d4q8 z6ofH1;=p@NS_7mk;|h;Ge`|;KdXG(6vS}!lV4T!#XMKoZ={>2t+JgT6cK_0=W`-~H zBkDHx8flc_b{rg!q^_MGI|FB9s0tXM<-N^du66Q={x&0 zpE#v!@v}F~nu_S?LYsC2_l=9}Y89(CQnHMq+#z7MqG(AfvIyUVxC^}~h1|1rzWdO? z{Y^vxvvrD`f?5F6kquWBfvU5oHr{qQXr?wDKdAH;mIxEaU~3iprEcU2^q)gsok2Py z6GPaWA7^3PYT2}Y4IgVDLvW|wd<{92mOC>&TIc$p2k0a7aK%cn^?&*pF_c zlVM6x3|IJ0PFdRvEspzbD73(-lTb3mUF`r=dJ_4GKlvq3(d|0mpaVJIqAX}S?nZo? z1C~Y?xy>9szMSLY-Y?PA`-C0=*&E*sJ}>xIg+IR)#kU~*R^aExh+(EBq_@kw{F=Sp z`$`KmJaB^gP1xgg13pLXNN;%O=FJF?nBuFLa%>AIh)(_1bz$xP^pnsRo^AO5Xh#K< zjcVQH$*bj?6nEC|BvRH0n(B(N*vrZM`A)c5&z7lHZZ-ZECP z54+GTbDdz`b9SqkYk3s&LhuJG7^Qi_FZ3)zN#NOAGT{oHJ?N4yhP*il-j<%Ee$USi9`f3Iy90ojryAzr~aG zL?+V2tEaqu=%Gj&P5T4*1jM9dfuM3lgEHh+i1s6o)b4qYlBBPnxJE?P&fbjmM$v$2 zUk5*W-CSziV)fwqoL`#{g7+){c2=) z39|n>W1p;Y`dNZJT*YUE0Xyx%2_4i;$>qZO&-}SFMK(nb*I+6u4e#UKb#IlcW*JGm z{!)LJuUARIitEf1H$P$+2sDSD&~{OW+@jOMX2}L9Z3Yk+sMa^6ob(DnEY1~p&#syqZnA{c7)YJqKK}F34 zZ~>HM&eQiN{{Zj%KJW9~_jO+jM4qe83^^_CHXP)2aj28SkFE#eSPQBs>o||2|0p)H zH+zc9SOSOr$dGyGi?qM8ctS4Xy7JtIXcG{-LX)7o$tP+HDth-WHOdzdkY%c}uBu5! znr9pY%isp|KKv^($B?dnZPVPP{#4Ba>?H>99+!iq{jHxWM{mT_Yr+O)`Lg!cotz=w zU|I}d%Ddx5zzpM@{XrYE#s`?>wB9@2_Qgj;_I0b+BQ_TVx|}^dcLvpGn@1aeK%nw6 zw*nA_GJ0?=&t7b@8lupWqt!>rX)}-yYZhho*%Q59tzPS3ou2%I(TyNL&oX9Pbx)9w zFF_mfpQHa=zUQ!j?Hc7qEV`>V`Zw0XCc(hv0FC-HqI^lAU%4(0s@gg6 z38b#tdwH}3A6&>=Xs_wZFAHPR83E9=WjZs() z1)EkyUIjPtn{7E+fl9nq2fB@iZ060Z?SMJ2*7QLaFa#*dR4ssQgb`{EJ*YHQ6=8JQ z;EerRY4i7#{c$WCPs}TF#*54>XV5_9xeh!JJM8A~$8U*qYeYj%)&iqQpx-8n?Kb&{ z4x_!$!}8cpsV~J~C2E7?mW}ZsAf&_v1x4Peh{TM|oKyWpKX(gn`A}-*N4t;uVuA=1 z{>U}p>?T{YMf0JWzKP$9^V@1a6So<~QMrlL)qq}w2?#$Lf!O9r#DkNEcp~=%JH(TJ zzdBMA+H)o2zCL^O=j`=+|7jZb?_K4H!BY~X_Ui7ByEzQY5h7W^$BWJsTjCc@)J(sx zvp?y0TIc9)!&IMD`vJ!F7#q?kFHnTpOH6zl6z;{Tt3sD|Q?Z7qi>QS!%C6VCAK?E$ zQ>>sJx-SU2o!G-2m0_jJBOE0IBzKhOK75$xWRIxlw7(hdLQ^n^HpWb4d5go);tnzg zaiqW_unDuzCt1B*8Ip6VDPEKCv=q4H+oru_(E|eNDK0nyB{T}QYKkN^nz^P*!HM_8?zHrFFE>ux*Au3>E zYGkK2{zO=Ch(pI@7&%l1c~m?ts!b}1Q^(g(4{WuUUW^;fD1pYQpVa1Q6^>F9_mGnE zop>_jVS&NxJXyI=z2?kMQbbsmcdFRR^aEJm!1Gad5d7MRs%kKi=~p`7GvPwSFMxM( z$$f;wwLj2a$XpMGjSq00&GCb2YBFnQg|Z%hkU$VRwRp>)4Q+NvW0wzFccQ6vHoqcK z??N|ck}{i|k+g-ID|R}|m-YV_Gi8zTY2N=GurnbF)?iyn8r5fi80aG&g;C}^h-waA zj_un$-TDUkplwv$WQUbpo2I<@(2>DtMYjjoBrZi?+Eo8{WotXB%M6wzO)bzm`LI}F z5!X`^z7gPuPkxQs@%B|!?G6^%NC?o&bIv!fwz}+oPU3^t$7S3i)1;R$fzzVOBM6X9 z=&TWWZ}N8 zaWiORiWrH?rcO>W#4g2CcrS7BKZQ{DgrAKljFY}KpjI+I&jxi4pn3BwuxhZd!9Pxe z7Vw!NrB%R>dp*z)XvyYE6Ui28Jc1x4d zGe40qI~c%|#YD%U-|yD8$Dg*-8d=vfEN~Fm#28iKs@go!xkW<#8~WR-wQN%Nly;G==LqGCNc5Qrh)Vt)seL0)%cCk7c-FM&q zaDAQrBL>un2&XuiMq zh$5tsRvO96-LdDZ6DfHv7OET{mS|!KilC+cE@N2Hd=B_Tws}BsXweyPjhn( zqaL+a=amUP6l>Jh6{toBRqp{9_{TJ0>w=q`(a^{GvZ`hNO7_~t$76A3faga2C8Ulq zRbj$eo7?!cEeBMA!L4bHNs#ZPuu3%eK=KPSF@H$31Uj9&FFRqqJlvHstL8H0&yU|I zpG=U-Nxr|tUueKGfwLt!3}Y_QBV=qM8pw*RWgF{A?S@w4fu~VT9y{tF@=xV=^W3rR zrBBSwP7mRf?UqVwiCMSL!%1z@S_0N!5%5w?mAp>>hfvDbW826xmeQp(jTb%Fj)xwK^4)>9$3Hp`c1(-e6A&p% zv4RZtP}}lmNI4I4{#y;_mK^N33Uu{SvDG6#XF~84_HVN1m+$_hyekRp#F%Hz`Hs)} z!dzE`4P&e7(6>NB9ho>TY!$t-xLg`f0jTOs|3;~SEM_8z@v=6G_5u>~sCZ^Q>|%Pf z8MEbMt=4)>i)V1m6vAi4t>mHrrg05j zIDuYM!&pR&FI!1RD7UIrz8)#IMq6>H0w~+Fnh0@8D^ck8&#zB|%oI7}*rNQ+q}FP2 z=P_y4iN60#-9E`NdF9B>9JSwo`g~~kcy{KOc2n&-Lr&zC0tmP*9;o>kA~P$9{G#i+ zV$cZw8~0;`Q7LY4+3noNw;uV^tjx+5@2bX(&0BqoU^)uVtf3D`Cm~ag3L*y7NdHuh zl$`3W)E;uHO2j^x921cZ^K}%VKGxkJ94PuA^|R_SD7Ip;?{A!w^u7?$zf#71#49|8 zuNOq=EbEFe3wWfZf`q_JD>d3%|NW|TI+4*S9GjN}m+@C19~%>TD1{y{{2^eOr(n#G zJ{BPHWu636;WY5dLkLJPa!OuBAQZPkkJZONpZrqCuM6v*L449J`Y8>^4oehUbqv!I%@w2`R&3BP7uJt3h#elPutd8 z<-2n>@3pN26~wNm1wPqo`L!>uu0r&tcG7XhkCH3Y72@GnOXSNck)kI>rl7n15qj{&1=oa=i zUK><(t?Xqs6=`DLk#jsVnA?amA86%0yJk3?jYDtEfD-O ze*`h@?kzEOPw3VB*)4$W?a)aN%MQ3~gm}D$7F5^^keWuWp>D88s?W+S6&{Nsv+dRU zZ6)4djf*&6TWEeCr@gx}Fw%BwkYn^~FqcPCy+pIsySRPmy==fj*5qX2LHKqUzpS4q8U^jWfEH|2d zJn`7b1Bp|n5N)8)ck(E*_GZw`3?kgCac2{6{Qq254M~~k{qR(Qs8M&nFW1~>IKzlr z5`W59m<92#W^_@u5vyQ__(lxeLC!B-@mnf7i*ubUO~Y_%!#NQV!dwLl01Eu-Di=Io z&vd~CAqDfxXGrC3`MZgKzXDhhlB@+YpFG@jH!cJooj>%0dV>TR4?PtjRA9ry25_)IWfI+;crz)AV|5{J9hh&V1(v$?f~;TXlG9%5g{amir$;KWkmS zR-S^+Xbg=ok70~v2k_~e7V&E}+e5`wwiMwcT;y5NM;)wJwX0#p8!YN%;fe<&*35=& zEdr5jq$W)`QE;^ivx-=y8KmQ=%T27)JZ_CjCxETja3gUtgtO2u^ReQ;Em)loMeCu~ zWDq5##Pa2tw#v2z5au;q>B~HL%H{Q%#0#q3A3_Of*B6BWghq_ja?67kAe9t8~ zdnc9UBh%2GbLzlxR-|FLIr#H!gE*UCN~d~ShX|xr81JWbf5uF3_yPkUuzNcpl}5>e zhwa|67Hw&pMoF=UgApE`H%)E)WCyp?5*QDp!_U+jNhjY!irF`51P>KYVKEC8`zv;c z(vz9z;*A)Bq~K8?eW|&W^m9pvxVQcRdMk-`frqfdzJFwH8?hp_Z3f^Foc9eOvrS@3 z-fgD`#eHfW%QwAe3CfbEeHd*Efxm$@Ew~D1&dJxY3`Q|MF7DU~?uMN12~wCE*nQp% zJT@tm#zow1GLYhCIrEbv#eZwA?wkAfD~^u9f|6YL;`z-icg2GYhA~foRxC!x4R>Hu zF#7VExp&HA{FVKBw6?Yodj=4zuDU6#h@=<5s_BM3B4H>{fx$1!r~e}bCCrm2-}JJV9j7u zWCs7I2?oE&M&n(RG@wYLYOmp-3(qT=Uu1&1CXK~cmM7kxwlPe^OE`iFk|aS8Qztp- z-j)+WZ$qYI&DLv0{93(*wgny`KQuTK)!1L2`BpJFz_2*aDWyMz47bn0I3BHmb#ou_ z=CA(l`+ble3m1@iK&uOBl6nyN=v6SmVg@u!VFfGOF8=+Ss4Pc>>RXaHm#^Qcfu3{f zO48*slFx;SERe}hFSseHd7X%f*ws97#dW#0*s~LH7*%caGOAE}oIb$VoOp z74P3yK5a1=8E^c?X+udlKA5R0jND@`3H@{B$lsgkdyfGx8>AM~&zASX@7^<)g{ESWA zwPwx6x1DOxh^PaS&%>WL5jet6-b>4x?3gx-6~uwSmR7Md@D5f^Y96o#&k%U$5~VrK zf8*Tphd7ar{Yto34|Hw4Y3h(|c%zu-k+t|z{%m}{A%}hZ_tG#52zMe4Y#ZsZIRP#q z@sPfBrPl5~g^_vyqI=5jkR0E05|Th*og4trY&1hom;^kr*_NWN*Zpu4VVtA12Dc8F+b(A$FJ|3+47metV!(kb53+C`f%~Tt;nC6@4Ju zdfrsP2+BI2`gPwv>>|i!;aPOfKXP2_zmX>Zx9ukerN-Fp(QKWnoPGW)iQca0oD(Q7?Hw<%t%4&D7S z+1fd2898uM)7aR~RdOV_fm-L}e(YELij1X`FxTlNqu121} zl=2vFUK+WJcp6osyGV7&{|s-p=u|R&CZuR|>wBSh@UOD`TAwL z+;Fo3*~es7K>o>Ze~u*I!JKUCk}+e6M1_*ygID_{d$HWh=6?D!`|^?ubtTq*~@uHx?8G zJ2P(`@H3$_BfvTOOv75I{_ySMiI(^bu=I|0EK3bJ!fBssj%7~8qDNKi5fSPg`}gb3tF>!?FZ=mH-2BvSLP%EUnzO8Fba%K>F1X6S z+$+e|UQN#GV(DJ&J^bi)7Jn+0yOUF0(|UuF9DmKcefTBkaK+_>OD0iS<2f^s>pGP_ zA-(%yUR@9SieriorS=<7*H4<4G3^PJQDV0wvdNV*A4O+s`|cT!muYl(FY)x1i5B)H zor~8daZHox%KvIg=B^`RfLu&I^rJwx@{ZKaZbYgy>P$c5%brl^={Utrq8I6(iSP|~ z|8qMC>`WzpO8<>&qCgswZ2#vDQ8c+g-z~OLkL;FIoYV}`toH-z;$!lvZkq3{{)T6a zCkH8kP@`>=2A^s_Up@LTRL!iU`Eg@q1 zkJ_-v$BGJsNuxZaZ&;c#B}0Nknu+Z=6B&hju1$HkgKPa!lN-*GXa7ZuGhP@gCFZwV z9bW=-x4p>yy#^#J-9wmjJn;?mq0e+H6Wuv5^iDk^9hT6^{C`CBSI4L{>-(Nh$+h(yNL-+@vo{FTz5nPNxH8aq505;Um131 z2SuT~D;P3XoUEJtqDWI-$G3Iii@Gw;p1IcZSCy-t9ARlQHWpVDjXAk zR8`VXP66Q#FbhwhA-mjPox0>6=S}z6JQ+K{A$>HbF$;~+t1x%73yJ-|@#2YKKo59f zj6?Kua((F=&=5eBD0qhMx0EysXo_~8a7AS|7nOajniI1Y?9P=@sce;}3LIh)q8Es! zeVwmuoM(S7_sy&KUhuxey+p$f+wxY&9c{YC($MKoxQ7qjTQa~PL^3ngR203b@ zZO8cjM|I4-piv(xSQm>oMxCLv2#aV!>ML`F_oY~3REDA~o4yJLDDM(c6+48uR448+ zUtqfg$hz^F(n^cA(W$n<4ol7^lv|r&3k{Q;9{=gF(kZq&@it+GOYaUQ`{h?kth+2T zDmB*`QwV-_%6Q=gbYWF#ZuucDn-=K!;?{+Gf2`CfVU+=1LS%8fjQ)g~<^nH$MRBdVQ0ujsyapx;}B-NcuzDswUzDk zD89;sSjII?oM_VVcK1{j3&>v0lsKncMw%TWKj+2~)cq8VzB=HiZxqd(@<{7Z3PT_N85SJ@`*COtD8*+a65!a}eTsCcHc=9gEbSLu*P(#{I=hxPVWrFz_-dVfj z;dgjpR@-z%4=!R!h{g-u6*U`_a5Wq8H{v~mP5Y-c!-|)}hdpueP#C|Bc$L!tdKXqx zGzLIv?*TX%ZgNW9jcpXxC=8U>+7!3bDv<^uMkFlLI)z=glRy{Eyu+>U#NXaE&5XbV zZhN}a5$0T7LW6z>gQ)R&U9q>p3RCnc#65utMzRiV$(E+rhD%8y;yL~d?p^rAZWlE6 ze%nnjt;b{E|C)I6&l${nx=}sDLB**iEtF48*Y#6tDd9)ciu@hjRt6NhEW>nR(C3T1 z+yl3mEs%AeJdF2QkY(wp%ED-I67Q@2Y}DNwekYzf8qjO>5j&Ioz#mlliIhj1j(2U( zBEoK|%)nc9O5G{dJ*?aluLx0kj%Wq>UV7enXWC)Y>3n??I}h7B0FpNTRt8I?Ws*JJ z_$S+g>L}NnRlk$gy}k_B!ALB&8>ah<_-v*a&M&L_G!-U2GS!9zMD}rvh#3$oCU*vQ z(G3(j<@5eLKvE8GHgFT-4B;ir-y|Kg3*O+x?uNhoTluwIM*x3ii979SP>SuMoIyZqN^2f0S0*+HQWL+2FPUSYz>qWTr*dkW+b-mjK#p{&)@Z< z=X*D*U(xVF(Dj6UgEk*NxVd2l{B%j0y0P~LdhXjc4zSRu)0o?3kunC&+3ezD2go0a zAT|eFO_?v&R4X2z5KqH|6FYY`7GF_TY;|w`gg<$&%HfMoQI#TucwwCUC5L*-*2ZiMuEq>9Wy8)@nF zy9zM2JW4_ul2O^RZ((Ibk-L$n>hA8uaYpRfume|wGPsRo?^{Umg_(Hk%t%xDGvu=N zGJlHN$~$#EyW|!$*?Z0=W15uKw}GQX+(V zH`brbZ-AO5tx;0nWCLK30WrydkAnb40009M_#}Y;Czx0m*f>A{F5c6qDk%U1 z1AvWz^(5F>7(f645CfA8fJOdHm_iYo&A?W~CmM!B$zGz=*#Gy1;q=A{E*03$H%2*& zy0l6370ow}fzzjD7=Zs*$^TRFKb?T54dLKE4T_UJ>3EX=slmZ`G6nzuVvu1zBNxU} zqp1gIgCJbn#y`OWrw)afU|78HBOv$ZIxGwb@2ZgKzagkhfZ9yh zrx>sTXzRQNqQ7|Yd4G~(+^)3wJXxBjozhQj8sX@sx!o|9b-&*!N$()~?e( z9Z2a!$?1hTk5@h`Owo`eJnaNNsOd_49b}Xgw!P@hvvt4glhQCIS@K>jEv&fQ=?W`z z4!6SW>T<;78J8*{n|fN`zyFGl4|XFg;UW{I-H!l;&15|tssp;arNy@24O*!BP;a8;^x~a{P1iv$Y2%Uyg`c)i<=kT-BTyc8}G7 z&sAUTX%F^adsY5UasS+wulW##3f9JIafO>-Z>WvTLMOr0W+{g8XVvVWA8Ttwb=md3e*&+XO_Ylo zF*jhi*`*bgZxmTG)2s*atL5r@mV^?=*QQBR^+%l zF4^0x=%K3Qrl9q8fxI}Utu)FkqgaPsTV&w1=Rs7?%rqryS*!--F z#yLF)4L`(l-8Y*&C?nzw#VTM`0C%^Y`!`sl6HWm<#cs=@z~d~41#Wvon2VZ;x}n;~ zG~rC3p4bO5#BUE8uDqm3V~#*+vevvAmz0$d&pY^6rK|4dtqxTRm#S;!%6iGQUIY72mVRfz5wE4SH|u|u&$L3AxkCoOxdr_=J1n9 zLB#T5{;FudQbo-236DJH>wEaHx%9Cu_NJ-6$9k}jC}HL1>|)rXfIjAR`}OrM z_vZD7I;u{O@|0@YxONWla#g#xxG)7TA-zc_D`1{0_n_Ich3=|u&h){wTboA5Gdk1h z)1`ODNjP~y`w|~K$F3LtF*t0$rT3Vn(~j$IbmJfVP>t?AO5pPo&@@QnG#c4}RVS#y z3gv3EO-8*U=S~t?2oeh;J}T<=TSlQ_$W4u@@^q<-;qMg*#X(pe9%qx1mZw?_??rG= z_W)Iuc5m6>di=R4pq*7W-wZa0$_$ej2N6+J{|N9f4c7LiNe%XZY1Wu^T!J*XOgGW@ zLgM3bL=0nf{g`$Vx1lzF)d*o+uarkmO1p*WP2u9+H#Tprwi482&5Oym%A^WXRn%Dw zVunmA6xk5=smz4Q$`l&Hst8jLz{P*^1$jJYR;*xf!wzGyfQgs;_$cxhxtej#2l}w` z%ZKZ4mG55~7}M*h?@-Zk)6u&3D34Ng-rjx$Y)@pVRHcrlzGM?431wK|*S=wIq~Bvh zrieY{6Z&GkxtCAO#FHgaK1eYLI_JwS)x&ayshZMCYdi3#?98aw9q$>*`Mh4jnELiY zn06_EY5De0HPmiRgKE6Rivdy=YVfUVSI9qN&H%K+-jA-!e!I ztp`V8zvF=UZb}pcBOCU>?je&bipK{j!4-IrsSxK}&v)UMx0@A4Bbw}%CVg9Wh9RYM z9fx(X964rsTPWdLqL+cVsM+3ej0uOX)JD-+udx1>j4wC8X1HUd^4N?aDC1)8wfQF- zaibla1LyusXz)eY>11F1C%&rR8)))lYQ!FeB58ZA`pii>Inz9;w|x<-YAIP(v#);RLx!%TWJA2$w|`k( zP0bHT7u=!+RU5?_2`Y=&^G%FM{46!@_+((ubKLMrw1=zhxI)d`oR-1tO5G*xR;ME@ zA!Y!yJm4SSwB~rG9`rQF%%7Yd~55Jpx8<*Ufbmc{HoPPgE+F=D^3c98EvIex+vp_4SfW(Sf(_Kb5Vf8&fck zdE#F6QRCXAjNKoi^4wey=%x2|A}=I9&V?%1KPCwK?eSTE+YLvXv0ipO9h=Lhsu6cI z8+4|8HGKEhjwd&DB=DyKrTeVA*w5$99yj*9utq3H6o3E`5w?aw#>CIM5-3)6V)f6^ z5HTKV!IP0t0(D$~AgPw4j}tS9#^MA|y1B8{UrmIkKgYsw`Ce$~c6Wb;0mm85odd;` z7lA24)2cF#J*0km$$DVpees7zDD(KxZW9ZOuEJhe(>INXg0&YW%Mjg)Al7XU>f^qg0jDXlfKWgs;59$n7BX$nc9A0lfJp zVKRlyqgA$ICVO|{vc^ItFzv}*9D~Q!_6SI&5ZIXWPatah_2;0YU$znIQIT#@n&hif zb?{*{tv0HhtPwNzwEx2ZY`wG3kZ7&dX8O^plwGQo!D{YnD+L-;W5_*O2*UakUp3g%uY^yI9sM)9a% zERtkL73<}(hs>9^!SFdGr8 zT^zo_%;9jo1Z!dALFK=yvf9h~|-4CA`7*Bj1{g-Og zH#X;|7@HmHquxgMj_=ng8-&oK#O;ISs(1gT3qy z(+RlfLGV_fX14--rDnxn=_kyJhxd6iTwxXaLYA5e`W|1Y@eQAc|H+sIPEbLs z^l3K(IC2x}#CC(vOjZ^clc_TJ7#kB>Jc+dZ{{mSe{t?0wM8A63JI=|#>mShJf4^`H ze@{k=X;c{2pJXd*;q9IR-3}FQ*RV`PYmHxEj#OCUeEROL*Aw!I-?l1G1zKGGzSNXx zz}w;v9{RJk03V{mjfZW#){98nLZ|6f`0&HWlVHpGVUuG0TJSJ)IRB_UeL_rJ76{G| z$Z)fMBGWRXQ%YPQN2F}GY}q~^3JW8B8N8W9^IRI=!W=@XKDT1=)k!>Tuu=BJ^wQ$> zi~LN8c3l;nfmRFvE>Q-5xa{+Vq?yAz%^m>~H*qyJ7SeT?_ECy;w`li~4E+!3qD(X# z>DA?|Q&-}Vwg-s=`LDZVUrlJSEoyM@0rVU4M$$KN)La>_vEoHD;-N$fgR5w+qXhms zfxt%q(x|cWYO1_RgZ*o=O|@~aaWwKTAIu4n|5|-I&58T)dyK@)!C#g-X!LOk_5`+6 z0-|{zx&UByvMUK>9^d;(o{#T`A@$6U5*9zp0wCVHT0vK?{uL=q*1)A)lAL;J|AN+l zo=&VG$AT%E^%(Aje*PM$R?u_?ICQXaGs_VWVueL~2>)y9r>8&p1p@CC|*Yl>NO!5UYO~|G?Y?OlKh>FyUaa_p`H>&!Qdy0_f3` zB-ObWVi3#`&eAxH5)IT3c-Qs1lnXS*5&6{!IyG9_SFE^cZ-3bzDLw)Hror{8O#zcg z>DKcgHcxFK%5*O^U}AQsYI)$9e!WuMy;x6843RbCqGTLdmdN%r$R!Qat?APTW7NVk zK|_g2-tU#1Q5OYer3z5@yHoE9e!Klz&qkg zlj#BsIxxM;qALRJ8}qI?tSqe@{d@#ip8QT$Cues{W8-aNIF&MNhyDOM-^0z=;yHOF zNq6}*u%mp9u+imE{_@d!L557jF^@S`{$44l}&w?D8`)>0?L|3f9A3jHI z7-Z6;f6J?C8X!{`ZN+?EYU?DHKV`rr6UDe`^JUQXo(oHsBJUWDmp8cXWwnbXmdsJ^ zYRgx}&oRmVi>i{_^$bz9>9L<@uT(Y})JZ<{0PdX+I8Gb6D{>s8ju0f-p-qK#`q_>F z?ww4@N4`=rflMd;m>u*?`X2u9CLP=Ip!q_b4`(i@?4doLk%*A9*$!Dh zqT|!s8|L(JrQBtNUa`40wPbnetdm15|!X)q?7Xt)?bQf3d{_4JVbnIBs7|N!c=7F-0D_x>Uvw3LK^QXYR>S2^x zJ9V1)jBdxFzbN!q6X1AAp(FHh%D)d4Q}rZmg6rbt4I)p*%pPQbUAK7k3W82Q^y>We zhdPI9>SKpK0z&PZrSp-lXxkm;+eZMuxD>fgEVtaVfZS5%!pI-@w;OR2_@tcUjl~kn zw{`}j_?aSrTFPkUquYhQp&Y)erd@nzYk3 z2FSk}ENlNfJSOORhWuJjGNhnb^x!4d&ua*EP-+_!{(lDF#UOZZG~C_)^VM!EoZ>$8 zPfOS&gl$&ZH&w1!4I-`~x8>-0AfK!JvoB{@Uifl;B01sX2!jVia=uU;hW;-wMT5 zG8jO#m*hxNtApYfjW)S|M~PX3a6_+n;GPMp?S<}It%u>El?H>p6Mam&y6t@inpuFY zGP9US2E`Kg!qFnWxIA;@Hv+37TK_~$+Vos(<#Rm+)rgBj?p|#C=U;C1y2zCGr!hI- zc7=P7@8Ng+8LWmSG+(tn^cm5J`3p%}gIVk-W9bfq)|6UCYd9m;q6)mem0Xy?(v0gV ziCAPdt^YPs{s~|9vv(L1ln$0fTG;(iE)I7b02aDdH7kzy=B*^() zqfO~N%@pc}%l0}SIufFcnyR}#$)1}oewnNRyQe-GtSytGM zZuRdys8xsP)_jwX79{SN#-uWe-36S5$D=kWFaJBeh__OqJXe2ikO$KT{=2=M_TzX( zbhdHV42>za!bMR%2}<9i_V~aiw9{2ny9|g-46?tj<45lvIak=%~lL?RmBl zs_Bhh*)Xfc`Q{WpL(u)h19Br;W2RbTr^a@eHvYI`+c5l=c<{4@+&hl|5Rq^*E`VZq zd!*;Gmf}+vO+S)kT(!K~z=m6&^%HuQDXbwUnKQ|l)%eqV0=r8EVHR~yZ%P*A5nzqN z5>iu~yPu>~y(Q869o;e?uY>ns#bx1Mb_X_XFcf}1#%nQJV%%z2^k=e-1RQQUN4qK( zTYE68a2ApC29je6vNTQh)Nv#FgD>-ImIJ8WbpfWYQ^ZKB|80AmL~HWJF}*>>z2yAH z9u`%8T%9xnq(X;mGCPNQVsq0IhTh))eZ4g$h%bcA6zjjmSCcRsiIAgx6Ow+Tmrupe z$i4_4DYL|KF9xox{SK2O7uZ+ZhJXR?UK$VAm;E=DCOsgd24l!8x*CSnXlQylVTF=K z9^h@&5=~&lz(K<71<2JMPDe>rg73uj^|Lq72<#1j$0| zgq6(lqCvE6S#ln(znQ{fN1I%Miu}Ciu+#ISSjPdi(r2{1l7tK)5*Y#*Gn3A&iUmXu`E*8)q7ot2s^*)ZTp~dg?m1a>>Fk^qqZz z$F~#;wmt!#t^L+;5S5QhJ8RNL0XfKc## z;iXJbw?6HQ2GayQf5|zV8kvYq^ilNNZOMbCFv?FiWUs3+zUHe*zx%j$addyP5tmd@ zPCDdJoypr?-K*Eo#=xKXt@1@v;j^gun%ULxhx~*5<>@|Tik&3RuF2HY0GEu#@fnhI z#QfakoK|66yhyRe9{varl53!S1Pn?y>;ETR*~U9~a46o>xsvMTYPTkiJzVGTL54)n zuT5m_DHoB-1&x)L+kKZ{wteW(m1>)5HexN~E7Rj~DdAndaNGj@x?=t79g4gO9Q?q) zLDs_e9P#^&q`dNIz0bTKJGqhQb6+qk zAb$zI!|I>szuTx`-y-aku;Qaq)oLNxA;`rV23+E|r%Qh9yZT-BB{n?7G$y_5vmSoJ z@1r&N`?if5|7l@YaQ-|uvD#b-gS$%BP%nGMlVQ1Ik-sw@AFiX|&7w6U8ohMy&F?!+ z*Us8@%EZ@fyyrxJpTk4hiOW6kHNRtzX7W85r~C`CAt}H%zMd&8rPnp+3j);@7yFmVJM)1-sZ9a9!{B&zX8V#v`xSYw$eI=ls=dr}*>iE9$C{^=d0eNuzs= zt&IFreRTQn06eTvf+oH_1od7trx`sbEVtn2SA2sfOv+ykgbKlF(R+?JHleRdpx~dA zDi{-=w5Wn)Z55V__SUASddp5$qJQ{JgMhrk?9-BQo3}#XGQ87e{ij+#0iB6j?h7GzqKybC*iS(diw!sHywD9cOUF7KHi){8_JGonGS zJHxu*94fh`ElYsVY4A;Ul)t(5Id@`eT}!H-K>=~yLwG#>tu0sKqBu;CRVlhOz^=jr z06gv#7dl81Im$KDv(O-$;d(#W2+r=II9k~dHFM;gG2R~ptV(cXfPMNfk#^U7r8Bq% zg&vi!d0(g#Ix5E&XYrd7jdK1SoYi1|LE05?7gDv?j97apf=gbE^M3qRB>bm;CP&H; zgr=54&)LKidOiXYEBQnHGZ4sJ z_S3DI>0q*o#8M0f&fA(6t^ljF%7f1`?m8bi(|h%XL@a{F23mc+uzY=F~A)CXh&$O zHjz3lD@OPzru*8-pA0iDI`YOLHvc@U8Vu4q?c(IJk(s|PgauXaYg}_($gQ$dPzFG6 zZvj9AYW*wNVEu_Kbc$xs27b@Ow|)H;Y+b{QI!hX$OBx+l=hD)`5gtLhvKPwlG|9br zK|#l*Kl8lsyc144ZF!Bw8{6xbgio!|G-_$O-)h@|eiLD_PRIS1PjX+p(%6`wYLC9v zgz2l|QRv5j5XYrlD3vK$MMF54dBC=JWfNWcy{kot+zl3E+9mhk5T5o+gLK~=Pz=RQDVWqXvVk%iC40nefw;#t;4C1VZ zzHzx-PnKcy8Gc=Y8(T%Rzk7T>_?lO$vQ0uc@^=!GFJHAA_{3@G&D#+*eKN$H{JY!Z z`!{Zj3VWCS9aCsBE+>X??*miD#%QuE&abNEe12zQuQzf*)rO+x$2NY)M~OQc5X%@- z8{OPXhVY-1^TLfGxW88Qetw=<@tX5EuAbMe>%=(>`zaa25hp}R@&8-&0Z5 zg3ri5<7z-RX@xZsfdw9wWP%3NR($ zJp0LKj8%W6XoS%~^0nxtQP&g(PM=%Z33({6{d90pxoSDlQJ`%;qT4PU8{|sD&|ZwX zz1i%y+#*sH(6A8O8y0)UPhUECIf2Ox0b8D3^(*rxPiY7i5wE+D2sA>9-=nKRC%Z?2Vex#BSzN%bupf|i0N5HZ zAmB-D4scDla!zZ=XutHC9FM_=%qPv(xO1glUVb~mDVvqnTms=W2mJW_JE4M}G|Pci z8>!8?y8fjrsb=VbXVYjlt`za&QDF2O7uoGX8Vz;lgt z7spv=>J*PeT*YK53~Sd&*_L2-z#3jrvrvSbOcr1Ef8af=u(t#Vbs24*+ltcm;5vI?{{jJaV7eh*ci8dqX6cCkfUL# zYhRsAZLM-)07#5$DqWa!XxWrav z()?*$6ORBq+_l=k{;=KtrIuF&;H&AoKDNen|G`?yeSic`Kh8+~ZxFmHxfsvr$cBI| zqe1p+5R5NCd%-Qg%}Vhjt+$ec-kJf4#XKJv@kGvJ)NmEhRgvS@fzh}+9hnKlFad^_HHMbPd+M^0kseeVTpHigSeeuu4K>ZGv0}rA zS0a#7J%V<^<+`N@-%Z>IA?pTAZG+K-WUXFbM%m6>E$ja+#~TlLh`9ohiFTC39NfA^ z)Ln@TVxI@|;NN-WWy9(6>^HNWiapVlgW*zWjx5M^1?&zkfn?7tEGX2zWS`gtI;sw=M9MhPQ{Tl*x1w-&EV zMX+RiB2UgYX$~uuCT3+Ywr9`+%mfHw;xPniE?|U0)F=MX> z7}}yJ^btWg38<@0!k^{M!s+p8-%r)nr+(F3(PEC3mz6f-&E&LV_VtXg{Rpce%+nB4 z9icx=jccxt*WcPx;Z-aAb&2}qlY7$o={epHcJ1i~GeuU)X`Hwg1B?BJBPUPrO)g_fbPzCMTfeq(3|sXF7vKl|LJDQyE-6fbqZ zD7HzjugoUnhwR4x42|$HHA4)$t4V1aQ=Dfbb$(zjSGSL_rZ<+S4)#JP1QIW{ljwj(_65OzM6jR58CGijv_c`{COXl*Uu1OcFSlU zYo(=t_o9r7(n-b!weT7Gk9+iyaUT4~XwZ&y_WpPPd2`(ESK0osu4pcpu_6j~kU;g} zIV_d4giLLfD38)*dVSsu{ZX;Y&oF>OLTf5$caPG@gz--7=m)5OZ22}O@1bA0hRHXN z(DCJ)b8ok&Aa;mA;^<1QnyHAqe?A{z+OAqsyy&Az5?XdTu=KiVXf6P%#VPc9sf(uk zuGW~d>yui=vuE$MJq8bx6=xfdIxbK^&3en+&_z}MD(76|;Q1^LyR8ydjq3C(yN1t} zjd&qrg+{E78Cn;jJw^Nw`r1J8AcU~(3uVem*>8V#C$*|#CPwsGY)#I;otCGT@97w+ z?dz5jXKKj6MyQeu!pYH1J@a4UH?g7fkO0nHoSK!8yWUFtkHKFwf2yRzR{1@v8c(CE zueeae<# zf!OQj{6T)Ei7ZO{pYxR+rPzMM(v8E}X{0TJm!*Buq)Lo5M8QpTDU4P-5uBb1!yGaFA(%BcjaT2RJhS`?bmUrBeD^x5Lq}{`chP+G{D{PbO z3QC2VQp^p$Aog8iM_zXP)&ADPa-*vtvUBRYuRIaaU z#rkZ}X$(_Yfy3}KT7 z#M^Kp>y|4rOqgY8jjl@hhs*dzNbnwJQViLw|MoMDdq&e}I&+C1R{*};1Bd)_h7z9Q zU0i_M0jpVSaRmL(l4dolmdaGso%!+fB}&>uF$-xkQ_Q68k{vx;MZzqOQCI)r{-I!< zc&5YqQ>hBn6*^UNm;Q_#JNRdzQXv4Yu(EX39E%0BnK~cDUh&##hBy+QC)uWJ_cjg5Ey@>LsgHLXG!(j@$5(SOe@EbPLxKFOJgT&Qrvh zwA0NBk#f~PLFO1Tk_w>j#W4f$%`m)*$|8QP4=aa6i}K@y!(Vyc$ZLP*ra`s8Y|*7B zD}>rA!{EJnlqk<W5f4BdVlJ6b!v_9_2mH|)GW zj+Fwd`PZIFg`g~=QiE{#0p7~L!9J5-P5-`Oqb2e1dv|1;I2b-Bz@PPqpV^s zUcLM4Q=e>;TZUInp&fk)mnW^?yjd3k(#nZ}8~K9}_weOyuo3Z?y04ZOMp8LEvjNCmXQ#ML-cI?_bGTn3~&vmq9w<$hS zxCy$kAOD&|1Sp7{Xg|j+8vafF8BWhmB~)6`(8yf+Zt@8;p= zE10JjIqGJXy8@0IU-mxt5SniK0CHh4}WVH$j9gL#2V zZN7V+NTQ=wg9@FSTz)qR^i9Sy0VI?o>k0J<-cU^w z-J7X_YIG$dB%1@QWf{Ac3~tGw&|HHkB`s3D6H>hR*j-4gXuPFpva&`b_pGA&ot@5m z(^xv3sQxHIjh==#?r!J(bg#|cSVF8%SRhZ`3-YsQ@-+Y9km=D{)LJot>CVr^E0`S1QL`~CCKhEI7|38> zRmZ(+K*ok1DD(VRcZ^2bMpI${u{Z9% z8a-{B2>hO1Gg`xSzDi}djo{sa1OjJ(DfoJUTqCetBFqCQqmP*-^N}1@#&zEJ@`>x8 z^B(dG=&eVlXw9YyW48(y$Le==^;*>GR2!>mn^a&4L|Y=Zwc31X#3BxmvYXGaTA37onEf)o!iS;23a>h@Mj~J zrYLz`NEJ0G-Yh=$Z)SM#gXv2{rA!rw2oGRZs@b2Uona%a8h-Xwb@o|FE6mv2e#Ou9 zt%!me#E|iTUW|Ty8s)XVi4i#53K6W@m~Jp~S(nNzy0EI)eB!en0fF%UJd$l)3RDdC z6EzrcY@R#)pA1Ew`dTW_p~A*UE5*Jt zVOg`-+{7``6bE=piaTk!EiBvs>dI32pENCl4$JxCoDLIu1_CjJ5q)7%y`N}v2eC7yhyO&TX&AN{51H8Bos4dh$bR1*i_f75 z@=)8lmv{t(NrkJP;F8&r*L^8a`3Qa6hTfO4nV~ax)efmsAkb|Q-^t*L)Pa}#kxzFE z!OTTW{l{1jM8!9=4^UVpRU2waU8&Rq>5|>YV>jz8bD}jWj;TB595L#`UK*x-eEkeX zKW~rr_TuK8H23U?3ox@>Rw*3xFG?(?FNa#B9KEubr1JZVX8^-df5OGTZID9-{dk*L z*g8C^8rdf86;;F1sW%nB*vV1vweR95hC4KqT4wy+KkmR}A``dL*B`NI*B{lu%65)z z$K87u$??TB3xmqcRJteYEzacW<)8^KvlH=-+MIepBJz#G#Zy>9+9)7(+11IBB407y z3;t=M79~0b)S$MWV@;DK6-<{y^wEaG3WpkP3_gh8Wrm_|)VJZnreBsTw&;mo|2rT!tm?buNO6vLi7 zsMs=iLHEk7rT32-JA=|xUyInDf=4cw=k?Ydd~(|okd|x6pzkln6c%Dz9$o@Ev@?P+a-Y`5g?w&sm$+->g%#bU)qM`+C%zi{qeG(2 zOiSuI8)8@oSi7FL?7X&5EYTu~`#QIhjEVk@#&$O*lA9)_C8rdDo~augoe!#Lm*mD= z_woMBQT=dpV(MzIz|XB;84H{|{ZFH5kn6)+zR=$7BWEC&IT2O7@4R9z>JERSLEQ2T0${NXPC< zEi_6ICX0V5+x5b`{hI{y$c=pxorvoLkHqHk!?y+;p{$b&`s86iTNCgJ8;-27hr^6E zO$Cf4#fChG3EWi0irt9IiZLvHv7)b#wjB<8Y5c!+yWqi$Ytn3Xa}#Y+tatHtA{gX3 zY0%a}bQ{q~CjRs`@6ZxmAP5M4LN8t!sHXCmcb{XeTHypVx1_-XDN)2Sk-eS$_Qu$d z*4fdWM?mjEO1g}ygf?r2imHEn^~%+8bT%crfI4OJ|noaO1J?!bO~ z->TZRC=A?u;$I^Yi4Xpu4ANCG?k-ara}Z!X>=A(QF%h&}S}M1E`darGQwNqD5+mWy zcQ7*nCH^{|Bd9RNO)MD6$Do;DG4!7|nmoLmhYH&la+-iSaZ&y$bXef)?h$s@I#rBJ zas8m4M?l?iux~WD&>v~a87+S(#6R{zB*z)(lE(H3@D7I)&+7P73C2O}L=(g#V6hYiZ#x^S4BVK_SV5+-ohzk=_j+qK)x@?pD_=qW6ftn`4v2#3QN~K;k>U zo+En{&aHxRqS|u7`CCV#aA};R6$1v7$y73YS4P#k&n5*(S8CkxPZ6eBb-Y5&c2Ve` zZ_!ASsH z%c#K(Rnj`Q47XoBZ9N^EQ+8_KVgL*epJ2*MHJ@$I2IM@fdjNRGs;o@H5}!4WTBk36 z1Ie04D>nBGSYwpt7bh2O+vvO{u@?7$Tbn-Uz21pCxR9bi%_L{?nYS}~<~|%=eJMsD zlE19bUyx+eAIN&>L9W2^=MB#L657-_rSsZlE$M|v!1KKXKl*12M%T~)hVcEgtKK|z z*5~3Dn#j5OYKA~_))bklx*xyl2a>1px48YwnoHl)4>}}Jt6rkhR~rZD7N~l)`L30{ z-5tzs7ccrP43M6jHl(jv{^meo2caOj$w@LwT3%=|1Jvw+s^m~nUO3sMd2YxQ6~Ox* z3N4z92tr2;FzvdejQiE?u@)MJK7kt65id>UP)6}C;>l2Py$Z{P)gB?Km~r5~2CYc$ zT0^-6HNMP=#g)9awWDDFs6JVZ0(G0&IRgSJ#npB$F~WB`(+?GGri)o){nxoJ`+OvzyVhGjVNqL%sT{ZhxLwi3T5y7JH>0-)q@Zw0`4&Z24Eb( zXYd)w!y9a4YB?3~fhppd?|juRB40S&61@xaAKo<3&EofPTo$a=OzBSz@bb7kN~oObg7+(`k&|#ggPI;iL>XV% zA0frtVhcAvf|7u&um(IS7qEsMw2A1n-0AP5ZZX3_HgG6Qb!Q zJ;D_I=xmkGtw(W{Ky9A{4#OOr*-*0bKy>HJ{4Xxtpwh@n*#Ebb!?<7^Dmr$H?$EeQ9>{?6DP7 zRIsCHT*Q6U%SGp~V72sTSh*H7#fIzB!=>VPDWPOr< zO}oR%tLdQ0Q3mO-#+5>eqc#zi4Z#J`!W zqDuNg<^x|30|QC@2y%g9jD$~^|3428CY8)5kl*gR6T0lb7hAa&H2HP>Lk=gopOdYH5JV9 ztyZ=I#_&1ZSjHrLj3M&1xbJu!O%(wNt0UJ0(dvz_&Sfq3`zanuii$ESZ%xYf_c}nU zJJ0hGP-l@$G_A9v1y-zOcVuz@_TIiCov=ur1Ot!wb2pjD$k&QE+lE3-@n5jAK!xm{ zyQ}x%CxO`~{farWxe2DVY|{Z-xiZw+Z}{mb0TF6N5!L=@h1b^SQks2)aoxe$BX zISDp6@DdOU$DO^g$-+rjl+mCsRaG~HUL8F#rknJ3IVv)Cd-;~EseJ+~e*;nW{kR^! z^srxq+3M`YNK>|c{&2yEaJU+w>5yz8RT`W(2>hp)y$x$p0~`|fQ*#$d6jkQbO?HOH z+No*=^r-B*7_b%NUxO`|W z8M`WMHOh@G3TYWBA0{9(OFNHYx+%I3y8zYND^OR{(j%Ur+Ax-y`Ai<(1ID5Bl5&`d zGkhz&aT{{cW}6;mN};0w1q3aR4gT^(MEpKVY=MxKL!mJ*Jm)OV=+T$G^|+i>M=kDIbtWk#C-hI*rH7`}hx)vm0@Ajg zx}2{vp_aFY!mjlxl?#$zaZaWJ^n0mG*@#QbdyplsF42Qen8i*9f?Mm;ckclm-ws--hkVkx%J0osV*TT zYUHTZN41mfTUXTK9G!>igrSVPO7p0dfCMm%wJM#yv~O>o2`Uw$_m?iZo2ax`ZV236 zd-~wBv1ZjFL?j^;#D)xbzot3V5=>J^O)HSr7~NMLuzZ1`Xx7|nWATFHZpXEI9x(_Q z5V`;_WQuF`udW(s$(O2Kh#JW9DBRb#&jVGbB(N=Z4QxALIoDxP^NT_ix7DSeWgrVsJ8$#-@U)2>kZK?jtFZ@h z#=`#qy|AX@OV_9OfB!$<=d1RL}p z*ZSkUDH{{P8h%?~;OrK(MRMZz9Cux;*QfX26G(cD6t=L(;u}`ie=J3O_1T5pT^+gp zpG**tit0@jBoA(XrxYJ~j^iesP!Qa~C&x~_5Uih)|ffDMjM zq55LEJW@E`MrMr^3s!a&VAk9F8~NkI#d6(a=ZuX;z;SKci|ahxe80rN=wU!><`+s$b{&tsI#8%G4 z(*aHE{ZFSDieGSCAJCkq<1RxM*d564U(W)*2`bfOv2B3&u01}OcFWC3YHhP-Me4_vYjnKm6E|XK#^Y8&I=;wC>2<- zLdJ*|J8gcqzZiikvT15!%r-50R<}HEIQ8x6i58^)0AiY@XO#$a1~m^?w&V|f=LgH6 z>5`13>!DnYw*LTMVSv#|Lc~!OPn&840lwUx-k-iAt0g1~&By^;`a>OdCpDcCo3bh1&=l=kx?YPlXOi1j}uTTMZskgiNovygN zde#}WiMk4!fhFj>8swWa0-HVY*jqqpMq=McTL(#1CxsuweA)FLm{X9{f-Kr2jK)${ zR610h)rEp6eMS9o$01`TS0tef+5t3sRd04SJMri-7iQ5Pyt&2D*ZUB0}fAMG%Aa&9*_-hT|sf)C^g?tX?`FL@Ew{ELj>`y=IEm`aS|>X+ip&_A6{pI6z7++zAT-FB8p+3I#qT^kwXwQR+lG^-S@*v&7`3T8jxJNom43Lj!xJKRu0O` zn13)51Ei2xR^x%dzB$G83U+owBC@GsLM@700_{TcWcHvvPiz&TX3KDd`s2>7jke{&+OfKs9WTE(mzK zj!c)Wq*7!z@f#d%+pxl}t(A{ViyW>Jnafce5=iVk*SD`8jbj#yTemR%BA-g^Z_{$|N8gB%gkMXPi$i zi!{P8x2g3Wn53XM-(q(CuO3*;k^!U_^9m%f-l$i4=eIquXi;8vVl{&uENyhhkChMW zi*mB4S3#9vsR5Q&(oWj~ENnTd8ytE#+{Pr!$P3dngHOUq(gzl3S+0Bc!NV(B$h!RH zPL0NpJ68LXSM&751KeeFhf!;ww&J#8M%!2*VJ=h^01-&J88nqdZ35VNp2neER@>Pe zo-x>w1aCBAK;nzimN(f=n!iD}KKS!lS29SYnvdzJsK(W8?sxU_fH}lQf+(FK85xz$ zlE(QRuE)8*rW4C(I?%!iA$2m!i0n?bATp@z2OHS~)1G_G0SYRqXC*2!wNySM$*$jg z1~n4Aof0KG4xw5q0N>@gBiGZ{1nRNU8~m}wf~V9axWEP29xsq928o&Lb_*F$S%?}%8{g^M+XanPo-~vvO=yq~ zz)_+6htKDSDwjYu*JW#+52$EFk*$$9v&El5?agBs{{T=@U;9RQqbLZ`h3bF+8i)73 zH?u1P5!0$rm0`9PPTttYem2cV9#1%3Ln@Tg#2Y?sy+u{wxScTL`jMu|rnr&STr7$y zc_VN%`&Au>&)*iH#=2D0QcZenY@}{KzgxUfLh(f`S_3MXZ%Z0szQT{lZS%$0k)=RY ze1cK|X$Vp*(5^Tq2*(E{qNeHW9Reb;Ya$5F;)teT03GPI=j)2;vmoVz1XnGWW*vai z+k7W|$om{On~SXig2Evj^}gqQ)conM*kCzm9J`p=VGj{-0iam1Ak}Zl#mg5EBQ8PV z%DS-=I=CdG=>!%mIiv1A*eXY667iQJMgW!o>`i|x476zEz^^z|)~aN&T0kHYXpv)z z*zJQN>1R|#jrFpG2EjT&Bp$Ge^awv+mLV*NI?StS8n86=cfSXb$*>j;vN;h3!g1&!!d6!i2lD?uVasg%~lww=^7J(}jdkMqRj%coI#s^-Zf zN!%Vb-(yFc$U$S z7U-RbAI}7nD@a-xqem$SMr0*OMcADq&0kT!t_oDNfg-JIN=q$Ju0DV3iAFT6YZ-c0 zeZ%jh9gTnWA3R7Z2XIV_LuB1ppPt9I41g5~=!n^oH4|F(0;AaQZ+uA=q>PD7fV|s? z&E2-$wkC!Bap+=NMr~j#(%agg0;>LF-<%a9LJ4w$&X9Q`$+P`%Joij40L)DzYmcWW zKLeAci*M<`9Cq*X$A(>XBvEtaBn7en7j%8=?}91R+5!C{wluN7O+0#8$53jLQ5i^7 z>Qd-K5lcx)js zBgl+MEEFLEg^;{=_4%B77(#p{N!^)=unnI}`(ie&6{{Zx`P=Oh>#i>e$3{8?q^c#)G?TEq#q$q)yMnPh9_8S0p z+xlX0O_i}3ppEk`0+(0vC?v%~yS@G|YoaS05NSwt2WCa{6L!Rbd~BqgG0|fYk&PBmM#LXwqg~0wR#uV9#^+B8 z!_B?f02@14=e^?M)q>Fytf`?C7Sf=dK-TO|{qK7DVkA?T?uL zCjS7axrt*C_#B^?qxzGwTQ=Ma?RQ5QbUBQ~ulbp!b2}HdkdjLcx4r-;oE&k@r0$K_ z{6zl%QS{Fh{{X5PIJ~(Gx8_%aB%1~_cG5rB8NB}h{YUX>-2PKCxj2}$h@nzIvB5Z~ z8Cdg*#g<~vM^>N%fGd&3R|DUiFXEXY6i1}36FV>nZot~lpywJQv8hq1V2opQb3OQv z#r#E`bABs4icyK*M&tp0S~f4`V-@kdVpb2F&L*nrbmTTcAR6^QJaDT-k^-!nk+%nS z+*jKQu_&@TtFY97G4zqxXga}y@Qe;aK zUo`A5oxjrxnG4LmK-U<)Z)neNrU&_guw~pSA)Z?KDPG(E0Gk>V9cULdV=tM^K%`}J zvW;VL)L_fCZP@!^-wpg>_=g~489e-x#}y4SsUYunz58OOT;zQ?Z{jL78@ePZt+=Cj z8Jam59;WQP-jYYB?Th$#`j8F6M$;+(09A9!BN^PXNhX$7H>Yqp@{wTQ9@x})U;R$- z?8sD^o*A0W5UMsiF}n5Ockhprv=oEkmYU6)-jt7*d`4JEAV;7pl@cSJMz4A#P~!-f zkBRiCAB7nop>iLHa-Jd=qdJ)SVy#Lx=W7kQI~~Czd|N}NW;d)UG2q>0P2T}cuOo<`5^Hey_fR08~*^5?OW|cVK2p$ zKTgcqJa+b(_jPm99WYUJ>{hKt`nyv6IOvQ?3xmN?xHLekyRqrE-iI0A0{mH#{6QN2 zMp(vWX{5_LhIQNgzr+9_`tz{G{BNvma;0ImjzEENPX2Xbp*XjtD({{VgPb|7j$ z6&*@<0Dwls`hTtoOF~-MC%E9S2=iD_D(LqB@wd|OzzWG^sEt4j{o^|$YVvKHP%o89QdO=#jEX%;$?6s<|@5U}}H%p-6pJd^9W0jsKI+j4Cg8HZejalRA zz5vc8Vk9K%0d(m)v;)W$XOeK{WdS z7J0LPltNB{#yY>LBNl#isjiyc@;ADof7=W}r8Eq_U1Veoe4qp($gq2!-u6x466KO@ zYT9~hcFdy51L64&{{U}XFo@DKe~OY=R6C$mAJp?z^5XDOv{R#!szj1LWS2oG5&#J5 zHpAy#l}DwX_#RmsFz}MinVv8ifQ&dv5E1D(?w$ zrAyFNZL$+b9EiY=gslTDAbq%A4FCzd-wj3{c@c}W^o$d5D{?PxNgr!CfeKWzwx$+b z6|yvuR8QLmDr9I0Ds<|gt+J9gJ6Fr@aZXCfUXC`2NM_ZcfvqH+Dh*jaq=I(z?}g-z zh>gp$HqyvkY!Sy6ipi2eJV5dtwjLUhJ80tvmxHGFm)3>}~;i2neH3dOE9DhA+sZrgn8 z=ZK&Q6pf~$6nxFH^T_Yb*ZSeC({!m~th>0|NEN3~I!~eR$>R|uG#*QkMl>xY1>Ut0 z-1pl6+Gw#+qt|er(t?MmMQ}1D=AoyOd|?kHpGFz zJdX4`VU-AMM9Cs5R-$x`BhU`S@y*^BNPuQYS(7dTtLfl@*qXDqHO+zC;nmi5m`tl$ zijup5R4b9W9^J6OS=iD@LB?|sNT1$KHH2_WZ9Nji%g-}2yjjA2@OR0 z-q^Mz!R2K-G_L@F0X22VHGASg@kxO-av>x@&SX_BCrtE?x~_k62(Ok1kp-1RKs0%T zwKPElHlfF;WA+%3ZDAyt^h}?qR{eNX zC+rm;q3ES7tlC_}A=D9An@98CJDUVn#|*}=0f8ivMsp)6>YYkS2G|dX>(_C<1FnOs8HRQNj8UGU znURcjLAL;D-~6EOx%%L9yEKvX>4N!K*g9=kD)u0d1_+IohGoj3sCUh@nwTb)|@VvAvGm`{U1-Is|PfEx8T=y=0G} zx<70tLs1N}gv`aOTJ;%pt(xF2;GO)ydvA$0v6qigaizf}+NncvSK5a?u*4x{b`Wbv z(n(@=vA7zKd;PFxiZzWio|Wqkbx<}B(3`w3Qs^$b7R)0@8-^_1ITf_(x*CDI8-Z8* z;dvJ3Iz=F*Csp~qci+?Z!?~DUoHOW&m?@-CY=L98!?py`HK7wOmy`x}tG))~-mw#< zvR1%Gx^)dCvH$^C^<9mxdiC28n385z4yzyv9l%pZZ>?|g!7|`E4X8-Y*2Xz&%VxH^ z-iO}=L!ijdHknBvnGI;7c%i=-YI-Z!1k$QWn6zm?F2RVS*mJm{k0J-@q>my<(yFjp z+Jhcz?OT2D$v`1U;X>vztEyP^?$!<19j?!!A3!(6O5a4f z0ITMVq_Q(gKb5I!oc*we=e!jX6kvX%3-h+9fut9eevF# zFbZ_2SqY_#gJALj@4hD;63IwIM@uhEQF_n~bFuGR;!(9%DW06k#*<}-nD@?V62@)Zlw*IK-#{$jrYK{v{z`G0Y*|Abge(cMQl57zZi#0 zNYZ?+S3-)qHD4?bZj8MWNMF-3F6V>6?|0j6@HcldGN}DTR(58yK#%AB1>j#{E`s%c zS%_*3R4ZLj8(;7DtV({2(HPS7G6kk3fjWf`dMZ!b9?&C^wxWd-O7FM^ht{v{k2h80 zIvvjIxBzxHSM7I;S_FhcSs;0o$!u8AW2c|JVC<_IKp7E!5&_ly?S8nINn*<@Y7&4M zlk~+k0qQr$WM;A>PzfY?6c+eK+-wDNgmUB^P_B_MATpqPkm^uHZ|3Isv-KXh;zy_DvzBcroY@LE`kMJ*YYL>oP^ceBQbzm(Lyf&_-xrT; zlF~S%sS}U}mNay3K-*v|f2Z`q;yo;i%G~@QHGnp&`BjmIsa0Z*weBw4K&z`D^T&L3 zqSqafsA&{0NV_J7-|R5Qsfd*Dvx z>_saTqQ$iq9FRqK^zXp=;~)5&@mJ*)k5J|r&X}_3)L8fP4gucJCkT_{*4s7X#_~Eu zymRqBTf*}1n9k{uU9K2ZlxeoBByXe;eZKhlFOK;3fAJis`L>M4mLiQ5Dxe*J9DiM~ z)?wxG6ET{0n#?WcN-AlsMQi}<2P1rOGJ$mn=mjS0f3_T>9CY5<)WwZQqNO}E%DqZ7 z$7S@}j6IX^_@k9>Fz7tQdXNbedw2b@W6=nsPf#g5*FWoqa#>?Lm2lM2N}vENlN$l{ zG;k8k*Ry0`gTs_70WTT`P@`&42Q}}CKM+wg>|`YN6x`AE?~RsoE;8`N1*1l2QUOXx z@86teC4lt?wFO5OcjxEE5#q@=VdW^^2+9Jdak)3^>v&2`qJ}L)O0oh4AK`EG{qdro zDoJ`!y@1v4`{6dr+dT$Rs1R86elcM;0s=Inur^pITg>K7*0&KthQaR zL}#l)w^QZ+0BmW7UN}rs$ts-;4uCwwhUd_7Irqg*VgnS6r%TbVA=_=Q>sZ?f=u1T; zkb=4xtFr=Bmf-Jq>Gk6k#uFQ;L8OLN2a&$SAO8SLoL=S-y7|{-O}mq9#NT_|?}Rf- z3b6zV8`AUiA1rNz%+iRQS@~iA00p#w>Ih>*9W}S__QP3!6U#Y4Iu&q1UpE|#12Y51 zGm1sj%kZs@iQ2uZ6^^VVjK@ksnycLW{@8oR35!#d4J?smqzmO!sfB~c=bGoa#ddxe z;hsqAT1ZMp0J;G4f8TFRRi5OeM;a(!WWkThkIuLC6EBxukJ;H* zEu&HdX-1^dNkvg0m{ zIWbz~hv1D0xbn)4D={j%r~)pl*XO_Oh9!bsAO}4cpvAT}Ziyt|8cdo*sEi5Sdg!u=v?W@p)B1-2Dl^wgS*Dwk#$k zPmnVmL|CI-E#D`9O2f}9%0g2kiuI=6$J5v4?}*6@qq_!Xc5j8;00_R@pL^^u+D%v% z(2q^SiYi>O*v0Ujwj2KdJbR!>P!ydby47>O{{T9w?}rR4r&g(0YAvlw zA+iNW@+h#&w{9+p+xs{UJJ?sP2*|Z6+B3%{n;Na#{deB1Vp6jPuuMY1rW^2gtE;iF z_Xm7VexpkfEF&8er{dzOh}dp@{#c2RV89mwi2-OauZsJD+tlyaV4Wh-0}=*ANZC~d z1$DDv@^{@w?r>#{x)2%F6p=AT;gn66ne80?M`YE01NYrJBc>(h6Tu~mwe||B-2(^|~Ao*N|tyV=K9Q^&Z!wnh| zMMtOjl`TOVSov4;?TEvrqDG0CVL`DVX=N5~akqLqcf(Tpi?T5Fi>dPo4HTuiBX2R_ z0d{?MtTG^71`>hEU|U%hKpS6fYi+r}vcL#~=7en-K=;>UM2=LS;y} zBxQg`(o1=li)$qB!1~q+8#H9dHOnCyRkc;`!Cn9!qXE)wG@u4FC9Ldh5m)MdSmIi# z3=Xqlh43|952+W5i*IE$=6rpiqU%(4Cu{V*zW6Xf$W)gs1IXx9YHJ_@LEE*O z^!>5sQBicz^tBRL9RZD1j^qnJw)if?^wpRlv^qd6YW5w??|3uB+I2~(W9he)07>6q z-M2Ny*c$dugrZddK^v%Fgf^fxR}6gs*wEzQv{fCHGB8VaFQ&*mkStXVf#cTkH4huV zS4fs68i6)V-;@th+w?;mzSKLPufNX{Pl{9m zR>iX`9K@_>R14gaKDKwpPw_rqsp0wgm@!aeI?8~fbrb>jzvqo)nx0_HQtTRUmnZ$P zNBDsBzYb?*)Eo(;3%05yj@4eDJQf$tu-D4h@&(~iK4UPAAqyD{T%N$SS-(^1i@d`! zhe~GYsaqViH-CEXiVTFJOwOQ%O(8Zw3s!6P#lKmJMtJPHBa#JeV)prV9N^bPYh?35 zvrQr+=>TgjUSt*lAaUBOj&USieoVYtCnbqQIzv+%0RI5(RoiS08D$bk&#nWhG-(HA zQo@OS zLXT@h!6R{rN)$}UkZ&Yr7j0dQ9ZJ03=bv8qx6A8yguhrhidxkWVyZ|gY|s_#ZtGBBa3cG5TOXbbiS4ndWK<`_ci!=}%%ZDpI14eeCJe&-S~E3{)%Y79Y5 z8xppn$8Y$>H=vhdOP3)d60#LSftIU56~hmgC)*tm68#60Qljdp{wBSw{m&RX38c%( zB1Goo463r&6aWO1w<>;s9&koQk5M0|B832iIz{bw{r>?+x||6ER_($4h9R1YG&@Ad6tekvW#fM2Y(4Q9s#)0Cho)nYoxoMywQbxG zaY;3#Ti(ghh3(mq%w)(JLoS`@1NIl~h5rC8JhJFNmqHaljX@iR*ph$vgU8$W1*nsE>-@!G$_Yl;7VPpynV40j%fC>AU#0M! z>)*Zu3WQan!akS*WZun_e^Z7ckYyvGD(FQHHIhYh>~659PC!r<8e`n6lDho>_QKnb zpptL1y-+1xO{I)~s2%ojPTt_%)nJJbn&W`SdJ3Cb(rEtt{juOeFbf)(-K>=wR*eed z)4%%SQd^k}{WiTz5n-vl=>Gtlaof`tnl72rh|vbneELe34N5kleZb%Ej*TwPsKf#f zodu1@ApNjQa2j?hWQ8K!eGU$wf>t(nfUPrDP5$e^o zjYG@XPn*Bj3og=09=|A53}}tXZhqKyMx@lmPo1_jI{~-f3zD{i2O-rQYPy(dEnw_H zu6uGZ1=5luO1l=ln}PE_wT40JI{CNhuEc@{F9elHDrf~BW(qb2#rCjB zKerp=aw?+A7!pK&ZCXg!iWSQp$sWY-z83M15R1aHtvuIAXeijbo)A{xlV05R7&%hW za869eelPq%7ldV_%7!a5MkH~t(v_qFdovI~8&K5VnE4sZKZs_f%}+R4v<5+BUf^k6 zBV)bqLV@20{6XR~_^*f(?qyKSUQ)l}$Y>Vdumei^05G;*7FMBlqv0D?__Y-k$@@FF zaq(0)mrYU5HyTVv^=?O;aeMqj<%c5*r%vRP$>aOy9q|05vvtx0XD))(MUq3V_0O*O z(B!`gXD8wUu2cxaDFM6lTYYO8b51EM{MoJ%DNHa)JG5lF@C_b5c(m}J4w7jRqmA4y zt75b(Nyf9o{{R8zN=DPq8fqR@k8)2K&_(!RvWo(M(y2*Z4v;U%!i{^8^CukKx#Q9t zkDBKiL=fv)0>(fJsuXFvG*2fK`K4FQv!=+ya-z5Se?P81)tLMn4sHOc8CcnYwi|;$ zfR?sS@KCy!by5a9-Q6tg{4vv!t>QyHOG~| z*B?uh@FPg7OwOe4#)jnn*r?2Y1f4A-nPmj=r+tlj<8(Z%PuLDD`NZPY)Y~eCwX<81 zz8B3!u2~0^{-X-hx5RO-@jt>jtiGVJ2UfC36<^=<##1$upDssQH?Tk@hTD4Mr;9Wv z(-SB(R=!*@0EXm?2EV>=gw&yc)Bzt0amNSS78#h`51nK}3o#6}v7#t?cfxZU2UneU z{;Z~tp>eHy@xyBY*#T@$ZIwMZI2dlWtfZ>Jtoi%o$sD|e9jP-CLz~O zn^WAH?flL*!5QR*XK2ie(**$lkihIOYr>K$t23*TNhB~E^X-dw>9lTtOfdwKPS@#O z>x@*hI)clnAUdk{2i$%B_|p_3K@wpygl204NGfQKcLUQCXrpl2M6E52JKmo*;;Y-b#8w7ru9n;OS%`~40uGgB#eR*Ym2FJnXgaG-4wEXWa2 zm|5id*UISPWWf(79|<9CxHI<-PdvHEVVWrfu@Jgj;$7;Ej@|y4u!aFJX%ux-)|GX( zBzK{<_g^bJQ>A!q=4<{V_cH;X&Wdx;um}tWY-3sVVr0Q@@ddkWgvph<|lpm+ivx>?Se_iW>>SK0JCah z$4NG4-*aPN2-163rUNuGm(vJkAk&hX7y?fFdvGw)2Wd$zb&b@PAT7|k9nS#rFAC;z zW<_RHO{`y1lx?xQ^PYJ4LBSq}q%?Y+frjG%E~Wss!_7cddhuK1q5_e`q?<+kFgOaPt&MrP9lo9L zAy~IeB9uZMSu6+g>KyC<-uLa>ea;{Ngz{J~klM3X8mRBR_CK~8=?NoU2AP9$LfF1o z*4rDP2R`^3t0P9iSyq@tx@?i$pL1IePItxnM6-fTMj)D+L&CI2VaO+a_}k}(gdia4 zlmbE&6L);E9gP9^!ty|5hDBJAUC6t(hk=$T5gk|@@zO^G~@_Qd5- zG$C|fHK}H+H_dm!AVxGnv7*3Zt9v`&d-_N*Eu^SDC{{{I2KH?K05WR-0M`=HRKNzJ#I%K0GR>$P+}9g-7-*|R z2l$NiP$^wNAM&40Y}Np_)435xm`z$lfrVcz2q%7d<7&eUT4)uD$kDJZsLB+C8?o7pwj??vy8|P#%Kcq3HTgeGd}M9_=Ihsyeuoi~5kX~>eK!ywZf&fQ{!lD^aVA>U z+8-`NKt>CNZAEVM@3{m3ZudU;n?y3s@#$0pNhul<03*~M1=$4ecw(!dT67gbF~-ac zXMWsrD}Mg}04=d5Rq9rUEX)*JUY~PkoJt65D;p}X>M1D92o&1@52+Qu`eB6xJV-S3 z#G3_L-i`Cf_xa(CU0Afr+1BHyp-ch4gl*r{oK^Vh1K>H?79Oo;5RWO=+>N~k__)o; zQI$w-Blv4by{p=}#TSmVp9%2_|K18V>Awvd4kriFzi6#Vvb$bN4 zP<0O5Y!9I4d|Py^3b#eZy=DUmD1$U=lrjV#715#b9l76%!5B13)62M8O;Ncj-wJ`h zYySXTB?fevqi&IvN(R)PQ`n0e{{Y4qlQyzCm83B>SzX$}3VrrJF^ab>fRavvZVR|^ zGKHm6r*^-W2UQWhRuz^hBaPu5L}@BGRVV=a?t9rH{+K#}rb&!pUsz43mEdrUWcx!NkJix)?~)sZ99qd_(-5;ry6frciP*`;DaI+`>!3f0Zxd2nI_UW28j0TDG& z3|L(q_O0+BB1VZ&hT4lV=>Uz%?Q62S;5T2e<5U>UpYNUgTL@MVFb@@Uc+?!fMXj*gS;DS=KEs ze8X7OXwf@zHs1nq7f~ZRA6c%NnqoKAzSV%Qpi7`q7BUu*btqOtXHKfS-|6zi7PJnE z9;cWwje)%)zl+@73PzV96%c6!RQe5xAZ!P>Cb8(0lyMt8+EYm!pND0#MRq^W1WFbt zRmYxyjS!EAV5E?E@5k2#GI>^?NT$2(+qcW@hQBTgokBWzL+Lm6-p|t!%qED7(^*gf zyE~f$0nHMQ%@2>EXdRem82TWF6>WpU)|wJbCr5WQ^m*rb{8%iJvfgdIRQnla5u-&zYl%|MbQ2r z4D1(CQYa4mdg7<>UxsD!#X~rfUdVvEA8G*6`J8=E!*W^Kl}=6wvT_LR8c)=a+X1rt zBlpKE&GMR;2;uXN-W4^-dfzi`#UR{1ubWfCkHrtL#!G>98Wl+*4i5enS zCsM(}g%!5N^pHRH2N&?<<1?3u5v4E^#x)B~)Bx}}JCAL#4AK-_*oi+d#vNcVAlm66 zK;)9oUqRall(rg1d#4(ejqNSr*hr28l{AUx+XSS86wfv)NeVk&%G7vl3qlD4j&tJTr0FAAB#BEs~58 z%Cg51o(YIjsz*T9+LLPJ*az#1d7+C#2_uno%2#}|Z(~<}-fVpfGvU!lnTl2b$OvLc z1eT7d9k^a+vz@dKF#(2Ml`3i{Ah#^^)$@IGZ z_{PjRX|0reUq9hPr~;`{^e*qWVm&v;doz$_bSRr1;?3@H^&b)Nlx<}p5gXj78~asb zC!O%OfDV(fYuFS9y|&})k5S`!H>S+lA!Z{r4uJa9IVhpZ3LctYa+14KxLjxH}E;(1y&iRC*YCuIH;12@;nj z&$i)5zo*XvF)XqR3uMs*3h%-C-ZnrsbasVhiyXnH?iFB zkIK$9h|Gf9LcKFpt8NGP>5H$yUOhMApTrR6U}X;x)u;Gg%`zWi5Aw%5_=k)BBOR~Q zi-kXx^nTbGaBRfV7>fcli?U7Ezsnwto9wowuW!bxNfoES+thtPLxB*&CYz{{Vb(B4!Hi;F3iUmG4R`?sl_X zhCQSSwN9yOH)iv8prM=E>(_qRDzX$_p)5^&gAhEnc|Re?ZZI#Z2@5nKz)(zz%l;aX zOgN(6r+w~@*j-~x$knM-SavF=h~|kOFiF2|=MNA3fL688rcucx0Yn3^?eYiL1ZaAc zQGr`NX%snQ=!3E2>^^v0uCy9^1twh;bjPI#Bmf9C>Ot+f{{W0ZqXR%tA_q#T18q8e zw>CGgd>S+e#a{YN<^`6(B8>t5UI*`k7;Huvv@_8%--lSr!Aw1}yD zi;NPXp3k7~u|2WrMU6^gDQw3|7ykfm2RLV@r$foHQV9+4kC65HUL+8-NUp_d*so03 zYT$eC`(TqzTPPf`s!%j_6#~U`UY^_F#bfG72@*c6iCYgLLw$Kws0=>~FWW{p{dkHD)F@ zn7WHlh;LG`D`L3z_r&9}UWm8;5(P&`V-X4g(stYuJMvBUybD5P_KirMQK+G-siTlBs$47;XXW)sK}*2IFhDh1I!=xvF|bjg0jmRFCY{X%4x zODo9MNAQnoy7s(DY2j#?g16^THv;%4-}zv2DM2I3%ous)zzypxh1C!MA6!gp z+a#}{NZ3gtd-?D1QkgkbYwZlS^h_S#)2Cm-6~SB<}tNsssF}CF5zcwu-l4Mpz%%pbnW+c+vzTsR@3_2P?V3I)3G-_W|dO&LmM*DUEVX|(v3{vzJGTkMR^rcnM zSgH+)K5wQV-f0!)1i(se;EL3EA9};tXp8jUO90`87X7}PefGy%AsAOyHLAk`Y~4kC;-wlQWp_ZdZKdtX5_H{?NZ9`Pib4#E(u5?i zU_+h39247mA50X+^G1tIj@nta15Tm2Bp!F*`(v^M>8Vnvxq-gr%}r z>WLX!V0_o*a3Mf@(>M{YTfl^Ia=DXqgXIJV+N}+=4 zZA@$dVEs0)UsHkT*X4xu3k|4(%z*Al*m5@Y!8p1FJ&6_}4{*&+p$6WD)`+ zy;@@%Za@{WKP*mEF+O3{j6ot|DYpWNv0pwr;EQrbsCWz8r#48VSGJ@eH~YK zox*CSZCdOT&wl)1jC4sGKPpJwet*Bq97@!IK-MmoNdwb=-;sjP2r4uvW!x1bNZWD! zk3)@Eu}SnMa!Pl@ zQCzHQ42nP?3bX6C_qg=N5Ao;XW$-@^k>!b`%$;AAfDjr=y4v{O$@Ie=)kk6Dk0~SP zzx5;WEN?i=ywRAVD9jW9#Fg~*>tZ$82S$%uk-0 zeCBdSn=>P7X1+Vn9-rxr{{a0z@VC_rpoG>HZ4xXuKU;D7|#GYA&dZ6sk5a3Ek^x5 zU&|js@QBpDSiD;;QkvDcBXL}Dj}4i*PR@yUlFFE7mL_8~VIl+pr%0(3{3mh0r=}T_ zO%qQd5guVDS17mVM1~Sr?O|v(`(4+z0hB+g{Sdiw%(2xS_EISU7x4?N{&;l&Y{I*| zu{$%yLYf*(ahe2zPs7LiVQr^>pF-%Fi41?2UpGcP-dq#7YSN?a<^Fi|w9&qUt5#Q9 z+7$#-IVAr8^;*AQZG{%F=cO6enSvv3tsgdDT@kVPtA67Wl0}y(@f|6WomQBTS6~1u z+%0du*v~D`L9T+a{Uu4zwt&#|z&1fmoc1&}A3Op4v&9=|SmZG~xNba*zh2aR@J!6F z7O9z(yf6kb7Xx*00HSYksB`P$_Iw7z{%Vz)aSY8*$>Az6V0A1U8Y&<#7{?=m(1WJU(+LRh^-v17e)=09vD zp6RfcLYGN3IvcLQ9nZ)RFh2BIo6l>IlN|*ss1OAXTDd*3nDI=4$mQ8dC`*EG-?waS zS{PL|I8#VKD?@Yq_9Gem^eS4;k*i~{`S12PY+ul>kDK_;Pg5v$LZIKmKHtk2&lAcM z3x~HT!9{x=vGhL}0g_LIo^N^=$MeQ7#3C|BC5E7PvG4sa9e)~Z87&wO6wB*fNHkcX z+rIeC&6i7YMF{w4R^0ypt~Ytiwzo?=76!@Naxs?8WeJs|L?D7_mIHuK%Ko_ZJ~mAl zvMVzgn51o1F3AJS#g1`TnTyUkTapiAH2_Zdzs^S6iYzP=2M71(6*EF3kdlv}*&q%0 z=hGdmQhG4TD#}`t6e!VHU_7UD>GZ^qg(Hxr_9}My99ZTu5^27hg?k_}_Z*xdDjfw> zXwpq>xxJ|Sf37#eN@&k0$_$B-gO<=1j^GW?Z|jaS{4|OlskDK{BVJ>1YOrWGKfXLk zNaNCm?TFMq+u!$CZD>{O8iF1@*={u(rjQ5&zkEtTAeIJ>*l>&3dk<>Fb5I!`evj&O zS*^j}NIM^uV1Y|Q3foaGHlhft{#cHfKV$Si{T2L47HS#%6E`CRmW}7j>j%vlu=EJ8 zD|hr^w%GfO4#qpNxioHv1HUJVynZD78~C1o!(Ka*@j=vd%SujR1rqso8e{-^(Z8lY zYIuHUu1Z3x=tq^sd4*Utd=PK8Jg=35;AZPjfA9X_`5!anhgLO6!77dwLK#pI)Y3|+^&8nW-)=sbUsFh3+FGYzw_}oa0C0BPZpUG^!yai$O${)J z%KB|svuq1qmfO(ZZ*p-gu!zKg7_3NG5F_{i9h>~b?oImrP4Q~Tmd3AKdR1RS(hw8^ ztLSUJ-;{BFrv(aTT}a+=8A~Cd&F+4bFuG+!6B*itDk=pd>~u&g7G0*KP0D5Y%5% zvogD~sA1)=EwCrH!iUcA@ES`;Xy}JxqJ{i42XpW9z%?7Nw^Rufq^!FiHmyVwYtel- z{#fJ2?2*MK`L@zd&D`B@dvEf=qqlO11CXNV*c<-c@i|pc@}v`6U4GwONUCXBU5OF6 z)S&7H?&y)tdSW0D^hrRdb(R}3=7IZPzSuOZu>^2!m}pcRf_r^%FnUg~YhvweRDdXQ zcKdINO5TW#BvfCSuVAxwfh6dA3QF2kyG?e zwq|J#rsHq#d>zm!71^KEG077oamd6YY2c6IuaF=Qe@t>TMd~XsZF*P=En=#@eg3_$ z?8wGlb0RFPqPk%GJ;%?T@Ep66(CgPD#2rcnvbR72c;j=v_)MRnsS9W#PNT)H)RK0i zR0k)1_?dTRXoAb3LXv@T;sJdAami1kSz2+nMUqNa%({Kh9DKfbJXk#0-+w}Ir0bxVIYVZ$^E^d^<`^`V<$Sul<$O)NDjvMufMf=;$5c7C0TmOQt~jjE)E_J!_NdFVJD36_Obn zfus>v>$l8b^Tw!C5i3PTWlOU3z-hGwv$btU<-czDs`2)(2ypTwag4^3Czy?26#%1u z=~%qavc`bOper#Yu8RZ7_PkR5A!dv4-e)l8RSld}Q)5!?&fg=qz8!|6n457$Pl9-W z{JvAEAtF^JY!W~MzSb9vX?!zNK<7fADKBgAoBsf8VZ0a(IUz-K2S7&PiwBO|<8PB> zE2W7JR*LQ_fg-Ek_TKQd9?~jp=(El*>JTe?DtB-IAr z>P?PEYQoZz?D`7YC=w8C5$3Ihnl=~bk1WdwfmK>H_|OyPAZY~q8YgT5h4xc*(P@!H zV%m=c0{7@Y)DPPg{xOr5S!FRXc(V!^X`(|N>>l-edtr>eWHY+ySmKgb3fEM0@Ao5r zTEO$uW-~ddBzDyh3glQ&LH7HKzWARn#BwJ1&#Iw+m5P3`JEc=_T~q*`eOUnDTxo^2qFDYqj30F`5` zlw>MSzC&N75`WlwVh%w_)V8g0n%%4sWZ3)QGZk2+jzv)#rl3Pr20R~52j6@?F-=e} zu;VS!E7OjKp{Gl2q=Gpc-I4di<%djj)}2SG@s=RA#D61?G>pJD$Ez_cK-5KVf3P21 zdS>Z2S;fY;q@xpSy`3Xsdwj5|D|#&v5vwsM08Xu%7DWR`e~|5X1Hc|VM@V*c05SC- zdUHn>9VaRy3>XMD*X@88f>?~6RcGR9U#ci-XHTM&1yMi!l)s8xyr%t;KkHx>2YzqTpU z=!;!}BXSX#5Wvwxh~ zMC$1ctthpVWpxk%t;z0B&kKroV;u%{i6GF@Bub=t6JvGStJl6c7L=G6z}AazD2~{# zAbkn#>5o`;a1;atl39Y0!JxH$`8Dl$oN5M&jFt=k0LZrd(e4H`rjyo%R<;=lNLdq9 zVm~JKe}<0D{cyBrNf8MG><5_vKDKO~_#6)Xu-S#jQDuxIhCOTd8;S3BE=)I zAoC4~cVC22vs`bDiz`p#4__Hf7h_-(} z;zX>{MpXw*stC&L0s4JUwPRi4o-;3vW?;?`a=sq|la}hGhhxm9k0p2&uq5}}=AVpz z5=L^;L6ivgjI2~r>j!d4*zte2$T>=LQGwO*u~ymgwtxI~6fVd+Rrbf(KLMSiBpA=b zu_}1uVeOBO<)bA@kd*~S=YM<0*}n&6S5!bkjtEk^g`3#$JO2Q_Hh&Y9Vxx8?az4-D z39VJMK80$sSc)5+&-czYJ{vEPMJ~BrMy3ldHRhrgui_%V*BMU>isD}*Q6y~@lS6a4 z=Ng|6N+CL>%yGE#u}dLVg$Cxy`(wgOyEh|BtQI+1HgzUVN-Pbkz32cp`5YN#c;sH2 zh5~uv^D&{Q+M5Ia09yHhzAH1?hRe?F1Z|mMtaOGhrjf`S-H)i>6G+0v1X5eW7&_&! zt??9AJ+~}L^t<_{U9i_f&}Ob^*@{umASGoXfBqJQ?Xmv=QGlfSB9xE?WZgDkNl`ye zi1u0lo_4}BGn~}_0Gpm$)zzG~mTe#e1Q0tas~Q*OVd`-yhK0@<0F{OHcNZqPV{*F< zw)En=;A?4pi!Nm-qcZ3tDI9%2DuJvnuQw-pZa?P&%+06sJrmYejZK%_nq}mhqof~d z!qR71fo3OL#;vGC}lB!pc{{YDIioC?~yGGI# zjH>}#FtWf|JOM;;i*cCg#AU`fz9({=j_9{A4YvqDK%Nm7ejaoX?cRrNlY zZ!lU|tf3($Q=wXhkPhc+B9E_YtXJkuO(}93KmxM{3w)6(w z$vo}v$L)-_iDgWI$!gWUm1`iM>9#i6xb2X->abUB&EK}e(0wl%%+6Wqg}ZAc*=r<^ zS{!Y+%i9|m6^QJ(CEy> zC1}*wN4OtyaE!WhS*d8KkhLn;abo_%9hmOz5f5vy3w_4>;b&Jfsbi$MV#R>3<&M5M zD5D(ZdL{75z|-;{-_r}v8XeV2fLhcKOdWugnb%08U{wxpZ;;!30D?^@V0f-Y5!)If z?Fed)2~EgN4N>o1w+8SOw3*ZeYXtgN(-LCI9X6sC*I~u~0L}pVjJITJ+U~Eo#O;FJ z4P{jqG7><&GIY0;A1T=X06$!QvHt+0elk2)@E?S{lPt4jT;%4Z4e*Ogqk6Px+qOR? z+XW;GHl!9DZ*~qo)BgZMzY#V^G2(t5nOEo(k2M;L49pIawgvFO);V9qvGc{t>Hh#k z?lgXpk2twJw57(7)T+elqjy!u0~N`h8G)u1^8n17Zq>ig3;uYd9xXm;vlBt&^8w~U z+BV>J;=Xr*_+Nh?U6RMs1W3eF^PfzBpMiwFb;F$hoMRHeijFl~G2T zQl{026etibyLrO9H4>K2XoF{OnA+=(Zxt~sG;*MnYG969$tzZS{{SoP z?}=rzGMvm)tgSGG^$^QVOW15Ks_W;^1uJoN()>aH0GSrqi9`q#OVdMOG4oOH!5kBh zOX5gi)0bk*+ElU9Kr8_pb^ul>vw4=DWK6obT_?-lFpk^usC~sVi(oo|4x3?zi>z*be;RQnvIO z&~Bp`*vPC&Kovok}Lu?Uf+ zQmeY^D#p+151t7+R8v>7#i`YvCRbwTbF%HRV_m<#_+8sX<$^4_^(ZKyX$f>mKDYbe ztF7OtPK_WHAbdb`c-oI_}Qhw1$MU4f~83c`GigSLS*91t;hGkP$ zNVdD%ZM$!X$Q4tl{{RaBZMkE=Y!x*!yM)j+ABfoE_`iH9$Ylu>D@Xuo5KvE0cRl^F z8I&y2wrqezbOPIz-9NZ?NE+7@TrM(>}K>8a?D) z%Mjb!eYW(#<7eSYC8m%Wijn5r3J066a7(g!EZNpRqcd4zHgiO=Q@`!<=LLz7v+7p! zC_Zz0-=`wR7YAOZ(bEzQB8K!fHhy?z*F!&0FpX$ykZp7Z*NsbC5Z|E>6oXP~kCf7* z$~=R)99MIAp!`m9>EHy*3rdTfRw$B~hN6U3)swk6x$y;Ht0Ojux~QzpxWDz|ZU@g5 zJ~c7_0JDjedb9pvdP-YxG>}gFYTJG92jjkZF2Z_9`L~65z$usLhLtst7>$F`uG^oc zH(nj$A}Dm{9Tg)K)NjsDYTp>Y1QEtE?>I;3+UZ*e$7}b#*R_3dbCYbdLQ9ucQDsDg zi?RM=e!iG(rx$1m$L3UO-^A7%kSRqBs;ySU9hiEPb;qg1vU0-mg?I9)2TO1P8c4gV z*!x~7WAQAlvLhshIY}B?nPa118a|+t^uQp@wok6+gG%*)s5Wffmqm0AV7 zz_S+hV7aV1^bkHl3+w4PdI}kpuq zO@JJO>#(8haa^V>u5LI~^(kDgt%(4Ovq!lj>5DTWz&J!AR*IPm-@{j15zo@_a?8aP z7}&yUT{mSU7Q36idkyR#+Y<-@DqSsf9XhW{-4J_mg=EhdAQCf1jNlbOrh`%YQvUd+ zhd%Qti~}R)U5gn2q5%Y`t^GIcjIT45v@?emlPx9jg)|)*BGAN^_=(=Up!UJ?d4-#! zEFMH_8g!AzY-b>o^HV!BmOVg=OJ?^Z`)yvF`{H>F%vo`*<900Mo;FeQJ@>%3DRjCk z^!M!5kaR0Ey`37ZOHn7kZ(e<{ih{FGt(6#&cfm9_ubu7k$8!1cK3i9nsg!^mt873A zxZBeNSspp_3)BrF1!B&pNIF0!{&%;%VAE})=}`>QJ8DfD=+FvSD-9r0OVe6qYS>D=dH)k4$tl*(QLx9lo?X z;gM47vYiV^lN*+1hDcQ1BwEiD2Xo0J(d72Ss@++LA&Suqq8ngnc}G3BzB^%ga!V?< zxP-3Eqe*>C#Hp|eAO#e6JmK83)uJg{Ri5O zh(VW7vy&(xm1)Y7cK~Rx4e)jzrtqwdC7`2Xd3Y4(Lxz=7;?PRaBT+U%19QKp0}bVs ziD#3jvMdsSgwZ7Nix}cjnOzLqM-tmPrmeTJw|+;rYywhPw@eFU1=AkkZ5|d4I^&k_Qtkm zpi8=fp@)*T;)dhf+aELhSu9g{-hZm)V}|1gy(l(4N0_xAd|!PG#T%B)2jf2$lf(QT z44h?QmU!4PL7+z%Bp&>KJbba`XyJJgr822LT|QkVz$`oS_xWQ<`1?65-Ye0Z6;jI- zYRo*L5ggyWl@xfdbBv#eO`|BlIAHJbX}uz-kAL^aXT-$R7u@YeqYD?v+0SL-gfTs3 zB$^{o0{y60e13rZHQ~lc;ORViy2=i<3w`$To4O+(pF9BHi)ETXbSrADSFi(oew_R& zkqo?g36gf)0IaR2O`Zn!YkXz?G~76(UyZccu<%HVMy)aGNu{L}%5|-SbW~W|x62y5 zw95pM2Vyi!87(ZTyBoR$(OVlD?s1y%=pAEtgxN%h#)0C}2~oLjcG!VlQa1M((Pf`E zmUTIN)N(q43~^==RrKjPS4?MeSkOMc*z#>!vtP2(308E=#%k@P0cKD(j)1Yiwi|aB z>5pZj36Omn{{S!1SP0c&nN_K~)!Ogt^1>?|nRzmrmoFe)cKt%iM@mNq%wolnx47c5 zVGqPI(l_OQ^5^Ommr8=N8XVaL>#!%y#xuBvFjh#Q&B)TmOC1D;Z{gCdU;T@ZuGpOL z)@|=!m4F2hy@AyncOzQX{jmitn98}+CXCF?plVxC&ZJr0j_26bVodDQiIRy#FC!Kt z5oLhh=$>!q?}WI!qU>8OF0@5w*Jc=i*-Gw;z>d^!n(u;4&S8yJn^OrxJpu?@1e)h{ zJD%8LD|ul~x--1_c3mcg0;JfzjU#=>t`@T@Wsr19c-daniK3KAy{~^>aK+Fpf)}5z6Ta>Y&A0s#$P}ur9M|qnE0`F z7z5le`y0aVt;06pJU2-hN+E%30M_BF0WG}|Xrj$!hNJ>H80w|f<+J5+RxgEN-<%}z z8H^g->2;!9f@_iW@;7e&;~FU=l*gF=02*{>jnyRGNdTQB6T#dAy%WC_6OSUmDlI%{ zB#yz;s8J!v(`SMx6Y+MeHmMNK=Q2`eS>5J$4wTFk9VcO0w%*0p9OAlMmZS?Rl#sTa zqS7}8xF_5m+Z<@KiK1?!qbm`rRY^>h<=`y}-+yynm{`Tuk=d0)*wsXpB&j$9m@MKI@L$J70BRX#|oQW}qOX(I2iBFFEIpngdMB!cTh&M_HzZ~P`{w@i58G|jj*c-jE>HHZxGG*w*iZe1u)Kmfu)t~Qt z8cd)eXzWQ;vwMIJW+gN?qBs>^D^> z+x~v|UNj((*X3Ja0PX94U$zN#V%psTgocXA(8{3M*n9Rk)_w@i&Eda_d_DmqB$3L{ zG1*l?2KWB}*5F>p%xsGKW5~F+nUyM%HyN)JWa%8b z%vVQDBya(@!|E>2tz(6Hi7eB_D}dU+Rw@iP+PU@LmUW6J{YDPQ*b%jp=~cHM(*n(Sh*gqgZ`5WbR55M4 zdk%0F%2a^9F$>{R9M6i&JEI6=r38{fxI2ws<=ghf%=2{NC9N+=7QnNTM%(tI+Z7PJ zS#J-DElC~53A4)|Gaq}r7dM#@<)JG6Qzv!vI2FM6tPK8Oak@R>8NJ(3%xQKc@GE|O zJ7W2vNoSNg-4;-5lROT_kr|Z5PjukMVAo-}f6K)Q!-lzR= zI1tAgiw|X^eM4?opWD+7v$m^#ov2U%JA0G+V=B8eIykZEjzEn1fguz|2Ca>5e!pxk z@gNcujes6iY9N{eWA)qbhij*omq|j(N)lMnQO4KBAKw9xI;wTeLAkUCdb=lYalZU* zi<9(HJFJD749W~Z4}@w{Up?#k;DyT~w1HW_gH7otac67_!)1|du#johq@6d(v){fu z5M(YSVKkaRAc9H%0B%0mtFl!9%^^t2BL!zsy$ywO20bfvf@7C|1d`e}L zQ=>=?=_;yffvV*3hO(JtlU(-2t%UjwGFSP0r7p}E}IzE3xR3A$29 z!GJNjU_(#!A{D8XZxX`iuIA zz))gZ{zipejfIRmIEyotnHaSaKr#yi`Dkyk-@Yp$W-kiN#iNWXpEx8EN!V2%;oFU{ z@=gvzFe7H%i>Ky@@6S8`09;)*)v~Q5B#aX#6avvK#4k~-1-l<#bFkYPkHk!WCnIE% z5bd51EPw{GV{v3}xj4Ga%^VT4OtgspYdp5XsB(XnGT(|b3nzuwU|;ZLr?!pKt4mY~0epot^+$r_JU8C$^Hc%~=A!Z)^zo>&hWm2uRUN zDP9~Y0;|%%KWs0Ubj#aye_Df!CcrmC<-QRJwA+5gWT&IvC!ReQi8oa`<4q@WK>%3W zem4VQi$4g>u3035FcuQblCuy|WN--4V`4Vs>?>?$wdk3{NTj6U$po=9MJ-(sr~_Sr zIBUbR5{5HoWGe_d0YD1A?Wk$)bses@!DNEtMdD_|Ih$1hBT!Yr)QZ3qWaLrYjll#S z#Nj;5nQ7x}#L=S3K&!UhQGLh+9zOf`I8!T{{LYBtb!QB9dFWo78f;Z|3*2C_&dHZ$ zNQx{3p_$wspB!o6Z^aJS$niM-=7lR4%x0G%nkw?@Sx&H{WwI}2`B3{~O_R^ATnjYL zj-_sZHNe>2jAt36o-~bCFjCQU6S$#AefR`nrOFrzfiF@5dV1AD>rY=dCcz(ZhEGfG zEk@a_mo=D@Km2_y(20tGOI-#vVufCz^uc2BOu@_LM9`{A>>WXnJ3NJZk-#>7!x^!g z&FADu+P^wj?V4uUrz`W>s>;CY-H{{RZ{xc-;h8KoC3F{-1{sFyhT zyyInD=*ucB3nH$d%rt^8o-cab;T+1Gu68fVsS40J^AbsRDp;?s#E+o##bfxNN@k`} zFb4G58y!cFa5ws3<2Nhi*`$t9I+7y|f8_*Uw*LSjSkJ-1Tj)FFHTj7$nNJ`An^SU4 zSvA1tk^cY~^nN0dE>UvpZu^ZuYzKWrA52oqo>K^lKP#5dOrec|c0sDit!|65OJx! z&fC>t@xk#LmrMS|d|!r~eg6Q_^%yflG$UwbkO01IQaRiLdDsJ2^uxv?Mw1$tz$KNY zJC-{Our>zv^%(i*g!sg{e-Ikk8B;9Gbv29Lg^lzij=rYkjVV%*{66zk^zBq5*JeSG>YQ3ub;LF9AnCKsBOR~&855XTX*y$YR4d) zE?PMZkp*%HX9Ym8ENlP+o_~IDI68+gt`ta~c3n&Yt<_e>yx%iioGwjTe^8Khb59!> z(DcKRsM>%DrmQ3YQU(CEupVMO#8C7H9qX@^5Hd5686#lAwJ|=N zlYDvx4BQg;DU|J09bDz)GmJhG9_IwQ>s+5~v&m7S^Zh+qWC@hL_^3bt73z3;WxBW!tdD5)}()bG;@1(RF$9jdtW#(a@# zqQ>>IyEU058JUVgx49?gP5h6`e0+;Hoow_SMPPq1D=;VtRo5dE{KszlRb%MCi#$?4 z4)7fIi+w85p(jBlh(TLj+j{rM$2?CmV0aArs4kUI#k7Lo5j+!ndLLTDA&x`vw9k_M zD2;O8i1TdjjHfcrEDyV{k>(n?_PYLf{WJJ(C6WYsj?7>w2+lPMAP`8d-H6BLpAUAAKbM*!$s|O1 zl%8v1C~|*%ewF+eD@Bsa%<-(Pr&CERpiniq1P#3a?~Xt5wQ4dQ<7qSK{u=SU1rbIV zg#fb`8sbwfrlsW?`zqI}82s^N+-T ziXI7^buUS8gizzx430Tubcl_XYw;vma*C3o#u$UBZAQg?u~+{9>lao(D5GTykw9(! zsxk7s&*D~jQQ2v{SUvt+Y$c&^{Hc3Q!b;Z0!siac077w ze}|XQ8%NMHE*>V*b*wz$v5JGAk?!epNKgo z=r{TUg!4W;imuNxol^H8ZT??>`N7A>lI;~}()fmS63iTFj)-RiYulQ|4tq8gCAkrN zstvaN%^kVku}7Nmu`08&##yuyLV^zf3O%nC^LUAvN~=K7RY6?s$i23#ZO4J$#kMcw z`HfLNiZ&a6inybXns}?VG2*}^HvDn8#^=Od*2n`=je-iQ_r@p2Vr5ix)4?2j-}T3P z#7Zq86X?m!r5#(!DXf)HMOUHveX(7b%JlVs0A*CtKqBpVqAz+Md_O30GcJXFBoHh4 z{cCTg7jp6}f4N1XPK0YH)gMiouYZW`+0-9;cmv90c-2uO zNaa*e+bdXMb+rHi+P3!J9&h1XjC1*AFWl1j2Z#^I#_u$6AI$0^D%Rr7Yx0v;O%d|N z!AT`3RI|o>tEig|bztphZHpSNF`a`fa-JUcor$AD`5vizdE~@@` z7$)@46PLRieA~j#!PJ6?KqN35dAP1N_2UrA6FG^Mj)Y|`q=EwK8i@zc16USjLz~Mp zL{++heAQQLV!b&A{qZ;(r4eL}B+#XunFX~_`tf7shF8m_vpat=ed0NighxmwU}zN< zwnYp3`)!4Mdp7x8$A)G907xSiX;_xkrCUnZxx4ycFizR5#0EAp6}I}cu2t^X-lHOE$6Wp#;NJ zujMuY<5BOv_3e+Icso{B1}&{dmtw#LTcOWw{(KBtGs`Y#rWjL6pn2|wfI<3haho@K z^lQZFY}{qY)~d3%Ob5|Xg2b@1&-MYqBOM-t$jv3v>TSy?025$(kCrODHc2Igm8R-+ z)FVhm>-0XH4e?@KV=U^SS#;Ul1HEze8{?hNdpB{kFzn8#iaMiNP(aY5)C2bRz}rIr zJlAz+&Gz*b+a1}INUl`5Hc8^SG=FcNIoM${Y~U4fzOeU1WBrEG~=(y_8ebrfgP z4l|V-K-o{5v9d=M+ZJu3sN{gkTj4jj64sj@*g{&JQg#kx|nr7@WM4X^lYL^p#+5QS1+)`+YFBZbK7T1O;0f zq`^K!?3TNr@Kw^_@(YGORF**nlD zZH@8#u*O>`1msFYN-XJB91Vy(jmF zZ{fEbYS!*ZsT%IN)n3)M5&T39c?1f}8-8uP<7IO$}JQZ=^kMvht57DrtU%npSIPLN2kUY|U6T*UM0 zpK``f28%vWKne#mZ{HPJD3s5Y0f0lHSp;-I1F`M?_`S%Kjt;0g?puwRHLKkqOZ1Ro3t5V)&Sf5+x)RCwq8uckfXZ36(;E6j(G*&g*f;6yTPk7be}ZS&54YL`+9TNpnKgh_mtoZM)Vh4thzD6;xZ? zqYJ(}-o~pE{{XlXB;*o0y3{q^unq2eRTKXJ0~OiHW>L|BNsXIPT{X9xa0%Y|#i+EX zwM~vB@d^J966Iib7ZnMrYS&f4+4Vr;|r8j-B88ypwegJi!m&E#Ej81$QwMna{Xm)HF_!>b#FWAjj8!(NBl|UgHiK2gRx27vII%8J>LrBUBA1hUx8WkPDD!fxZ1F}EmGbPTeSzQm*x_yoG=@q|qc8a9Fd_Uo;Q#rI~ z#XS2Gf1$cROmv|cCrIo?C63t$Vpy|qvxtEWYBZ7>_uOC1eKGY94DqSFIx?v`13&_T?{+6P?`)0U1?7hu?0?b1sVox58~9+1!@fV_d1Q(|5i;hoaw!axS(A$-eehH9azY%r zC;f<|oT`twehBg3&qC6pRVNnvE}@j#uK>v9s0(kF=-wN=L$fQK0%b78T(u=4l#@jFWZ^t}pAk)lp89`LP#kGz&#jyT(RW#g*wm3m#y0 z{>~{s>L1|c{6Cw?#F?2VY2sreOekvPD%X96FnLc2%uSilVUd+J79K= zStIclW?Y?XiKFp<0?lOP4dJb=h5xT_muFnm`!jVLk{l!6-9Pna+OU)L5{ z6XFPwh-GPl>?3j5@S+e8zJ~FJ)LB4%jg9NI)}`0cU&I* z@xAbW!Jm|Y43(`|2KfQ$%~8U$`4}LnJ(4k#?89aN2*U=g3auI$RK+Kni44I(-@gZL0{rn~;)&-VF*}iZYv+pOiBSRSm97*G zRt}o)y_58>o;_wA#?EBw=tbg_OUSxVfI$OpK?nJ7i5R*W#EF~`Crw%@uIqze-uP;E z>JnMl>Mgd9H4|iysXJjgtQ{;>5>@~c8595lueB04#^}2T*@R3}adZvQ_t;^MmZfg%SE{{z4f}8M!AT^< z#n>5@osY~8ej$FnxWhBKk&v<0t0-sG<9$E^K7uh& zzkcJlYx>?ZzkuXYd`B(2E4vE{2m-&C_v?*m%tWpQhsx;}&@=@=g(=au^{}1Vd7_kOiCUKp(y- z^*}$F+8E0CtQi-YSz5n^R4)DY1MEKdsqqgSe-IBai&s_J{L}>vi1okeSeJ~$NCZso z6oSFKR=|Kc-;wgg1alXWB8YTfsj46$EzN48Pr2ueIxtsk3#5^`@NT+zTq;*ow-{i= zo_yPHSHFMTY+SKkv;_bvtE)*$Bq|F(Q~mFZUxWCTXiJxjKS9)(&8C|steQJ^Bj4p% z(PT5w%_Azy9I=Q)C2Uj6Tl~()=yA@^iKk}Va@8Ae!))&sV2UjxiUnwbNjqM^>^Avg z{S}Mb1}e+tJ6Iq5y)N&bahCi%@d21gOH$(|yAkr=989gO ztsC*Hf*R@$kto5ClT3CT5)Jf3@ zQfECPR4RrPcR%fln{G4|YQbdn!by}BCxS@>(AVGh?~61c*|hG+pb|UK+Wj^s676f&n}gWjblqHtuP0Az6RG_&K8z^axqX0yL<$Ir(kOPYGg#1seW%6U~?^ zfzL^jlrKuGkP9DS{-X)!rgCGB%wgSyxViw7f3**6RylH-qUNe0vgsN}e;HN%G9PdY z9^SWIxEMq8G0B#gtah>zce1PHBzLp(KBor^QW+hTYgGV;2X-fapWnXNrbG}N+JqVl zQ7$y@0N%ZJ=DUn;z_~vCnR4k+BC;l%=te0cm9peX8mk0|lyBi%0_weSZ{fLUI)^PhxtS$bWEB_) zszT~Ft8rbZ0efSsmP(CPY|~`&sTyTW&eq7QpaSFJ+?u0%8pGErOBh*}DAI~4y&%;B zqng{;U`d#bw1aHdO1h0Q_U(e^Ro+Bin09|Ilq3cOizIz&jyd-FGd=cM%a{w0jU<~@ zf`PUgPXsCcEr~t&8{l3V0RKP$zp;ovN=DNF>Ba)rQJH96i@I(AH@+eq=JGjnG$U8# z)m)$hU0%c6&Cfe+#w_wNK@>4c%q4NA<|+wbE&1=b18wVUBWgP@w@6xSoP#M8j2yE5#$?r@_Vz0nc%;lKL}OQxwUm~^F0;ts?gdugJO*WXa^@K_>Lq4Z zGBKif^51R;zqTMVP{oy#HqvW%dkLt?um;V4^1a0iuS>xs&WRlYGc1e?Xj<}Mf`f#^=8K4e_-`FpZ--d)Ugz>avWaWn<8FIcmNL+WpU_AIQqB z;_5?53{jvhxv-@{=WFl3BNTs}G0e{kJiw2PS+_JR<+WnTGZ4hYr%_u@mKH~9x~=)f zS+7;t__nrbvzg%T%4qPcRj*w+F9uGS=j`&~#O)E5Ee3f2p_&5wpQg8D>OYD8qzcXjuUfLbKma;NjPnTI1 zs`&?w+kWE`@Z8Ap4-8z-;8{V!+cNv&y|{3j-p^tMc?J2V-iLY0RvBzsK4~>Tl0ossLy9h#g5ZvQY^a2@78BFRm}l) zu{Jp0V}vuZ^yNJ!W)XnQYf0y|VVK-Yr4MsWH@tE+~{{S(TS<(h8q9|&%J8fr; zRaMRHSC7kog``=$f5Q|zvvuPDiZlfe<&W3?3*nhLrH*kBlQ2|i>BQioC?f7MSduJy z3*Lr2@A166X9)dLd_mc?s2(`Bb1gA5P23h z0H{>fplURD^sGqu0!tA{Tp1-%rGR&80Q^KZs637M=WJK!zYOP|D8w0*-9<45+jF+( zwxf?TJj*NFG}Rc)@8BQ9*-6@IbGh;*8oCG-5l?#54&L7QhC{&o6|?O$G0bEDB9kWT zNBt|d*nc27uv=wB@Yk(W%IGDRE?`za)X)&}S1Jv-y+g4ZOGg+rkN0GE1#4Y%XVRet!& z9IS8^5U7wUqbx{(u~ODRpiZGX_~Ni^#rl&&e)dpRo^? z{yar?k_j5ck1bZzD`9_peETPnh22AFYhkf-U4cAp^T*OZ8A7s2(LxKCa<)DnspFBg z3}rHTKh&wNj+L-)a#USWy7k-dj?N6#g&|HmG@pkwO%j;}kcwD{?);~VuVee;ca-sr zzHcl6ii?#bSpac+k-p~fj{H60?C6>i7`!Bl0Ggw)19RJBSkS+ONUUPYGzs7V#fe;k>NQBbl&78XtS8*hroE)84tNBw@-w~{!rg=2WaqOsFa>;bzco&FJu9~aHq z%o?dql?Q47_T!!Lo-(;uO=!XR`YxMcYe!WQM?Wh5?;kJxLV8-uM(m(j0hnLNFZnk-wXJ z#Z(Nj1#+tN(fJtc$~WG(t%m;q&kg5UURFCQF56J=UHRPaisLX?mT06VO+qE)&~>j2m4el*B1d0|Y_UtfSRS{RVn*AQcTTYYd0yQmQzB|LcUflC;Pp4V7#rD%a;Ywfn@3ygzk+<()^*D;*&I0-E`L`ZJlPF%N$;!30nqWct3%bV2 zeA>eUA@g~y85j!(ovXhbpL}xsg(a7>4n?~yV#t|e43WeXmMW4lB>NAgW3aMGBNkmJ zQ5r)Ns2@N+e@~_=BF)PoXw?=yA*oK~jf2SS1rmKm*n3=o3bv!Aih!(0A7U>EljBxq zR>uZOk}=YUM2dw~RajB&XK}~R>xXhVIptO;93xejt=YG(NWL$O4PjMj5zB>NgY@*4 zJ8U;UJ@ImhB#Oo+eE^*(WNHU{8xeFl`QqMYzeu+YY)GjQeyE-yM@Vu3-?g7{>5j!T z$c{txBMN)bJ@`E1xy@*SR7UE^7Cj{C)m&DRZ-2fa9L41cn2l5jw7C)V`Vqcalkw<)6yhT@Ixr3(|Kdaqx~XIHQhMUDW6SimO+hA|s8{ZGjq>4&2Qm9aNt1Z3P8xL#JF~;(7yR&u~>Wh&H%u1-}?3)(e zuZ!R6H^*i3D_ms&KML0M-iN-}7g$28BzuY)cjN*HHf#FfSf-9i0tH4 z9PUmKlvDeS+d;=odM;JT_?Cu_RWUG88M60#ZO;Cid;G8imJ^O8U zxx!5>VpRZXUoYvS3epJ_N#o(Y_umoBXO=@DR&%I=&D7aX6mPcG``~ONl!QQlk)Q=T zBHiD<09*Oi z7@9W<2r42-v}>X8?kjJZ;?DfwPN7ds#0xqDN<&ezrit|jp4?+RQ;euK(?wor{$D2| zDU}w`PQcq6~I{Nko1>gE|1S7nii8g&9d{_A{Mc#th8>C}2j+@|L%hK`Hz_^(5{K&V;xopOw4^6bGZ6|8~0Bx~o(}{AqwJeP^x_}f# z4XpgMoMy7}x@MA7XpBKt>@=PB=ihU_E%OpAnP~!{4zY@5*s$~`gJ*ndlzo}v{SqS&<})tQZDlBre_P**)>2#J7v1kmc3FMx5KT5(;Wg(TaiZ&~a&&veOqQsC#BTE*Vb?TrLZpffZ8`KW`{`gBbEO}T)VWyL(@iBE8>{!xB z0=eUTs~PGe(~}D>QcTuXIP9!$!R2v6zztPh{KM^zX0wsS9EUI7<5e$ZTDLXy`fZAs zCS*{fG1c4?8m%EW#hvfv?}KG>!#X4}A$cC7swzEoQ1`Qi@q280eIhxDWq_74FzP86 zdjQqj>$l4j@jnoo#KcjJOC(AXKsEra4X?FX!8}q>`9P4PpOVWbsRA$2qAYOVB+!{Xo6;iBDFhfya=fET8|bsg)Ut}k{2YK32^ zmW6s{-4GQ(iK}oyuz29%c)UVMW^HY$0)L0ujRJqZcfoTEu`HJ(DAlZxK(I$}J-hp1 zD4|7rFyy804VYr12>umUo1c7(zDy08MNKZ_o1> zv&%F|;U}siR1uFY0l>abYQ{xn>$16N5?j;>;DO0Id*bGKl6;gp!4Wc%@M-`R-L?c8 z!lwrWqBr&1E4*7UBUM?gZWSiC1uobRrr$hhKLzJuT(*BfDjcZ@_WN#oZ?gXYB6r3% zd4!TE4BLh*bdg$AlUMQ|d|2i4XD{F$JtJBfa+4TQ7>_cj8^2+I_{7;9j!HRMB$`kR(Za4|&@6698hZv} zFOG4VDxLjKtRq!kkC}-|$jvi6TL)SMom&yd_qGF^n79Fy#>wAvw>6b|B(yl4YmiHUr1JHInrS0ec34 zzvZ~c?BBz=sD@82$V_`3ObOuD!ua3YABlVxR?B$KU4sQZMGnN+ZG@Gd7Kp3$3^NTcrid;SYbscfO8T!Dwax`xsRfZ5Mr}t}+z^ZbCa$;} zRRB1}&I;)g&zkteBQWwR$JApPUITArQS|sn9+;|!D;bp;Lg*&JuoZVwE8lItSh~-; ztnSZphGlTH&8)wD1sL&lJxoyhZWEPXN1iunM&m)-yn z{#FItCB_^#VprGLe?G$-zYc+A%d09(u3;--g>C__YSMn##P~}O%tlp1lJB6FK2f6R zi>~LJ?TtoHGQ9}l(qxfPDc0ImO0WTe7u#2EG0e>uV^uQJDAy|ynnSd=VZpybEaA678#BwSD3MvNl~}7T^f#ahCwu#1qBn*(%y2w#zHS0;GpwT&rfOBy zsDiJ4sxa)XxyVh*>L-kr{J2<|K~bmz%EMwnJaLNFpXcXfpf8k=ndwVXvZw$f5*QI| z)OiBA_c*A}H`gDs1WDuVY@L{js?5yw=3ZREstl$8lWNK4arEES_N2SXzrFxBj^G zpTqIdhcnTd%}M&E#-N~(02`iY`)z>73~4lhpQjr*>C0zwU05=x7JR3Dg^R85Ny3+B z2w6w)s}b8_d)?m{^zaP0f>b6phe9K&kXy>k9=$;oxA}4F@w!PIdF%to@^yM>z|2FC z2`s`u7%#X5f0jF(X(!htOcY3k!v&&giBY!(w)gtrf*}eCD@3tCH6*@JG!m>UmhuH} zTYb0~o?|WfX^b$dI~f)@+>^Ot#1CE5KqIyL;mTVS)`a7Mvl)!wjYA~kFp;uyT`9$V zLXTa}7m`%W%Pi6;(AiGSbZ?@u7}2mPy2H2+%bL3#9Tk7rrZ7 zq?vrfYGW$OK|^5a>9c<@xHT&jHTo}^SI|sSbnLg7>1I*Kw=`<1u;GcJ%uSarG_*+|1cDl<5w{zI@3t1z zY&$*pS-|{3l`gCm)G~|LO^2H2L8l~$K0~MAL5-7uw-*$Hi=nRtOBVaY*w%d1hr5;zSzU%waGuR zlO^b>i!U0@=AE8*Ry`;oEEmb>Zu@R_vQ6Qg8I~vXWO!sW9pqAZ--;jUhugxuW?58b zG9DlqU6N4h?vdDTRU@swb>8rnW5K8V%!w{jJSXX~risLAM&Ylf7vAjg$>$vT$V!bd zn6khHc^N{HzL!y?0aQt_KKF;=&g9ZPQf3vIHdC&^zkP@AifpHcG}&1H04ETYi)tK3 zD*4-!#{&KFWTP<}M6$<}RV$}bDitgV+!DLh9xUDl*S1uX5B~smgbNJ66#`ux->$1Q zg|i+cnWN-Jc^k@s-08X;3LI^|11E;&3o9Ioil*v}uIFp8F0RdQTz14CDVS)-P8n3c zn?R3479*B7$8WAQB$q~7Z$Ts`HD|O)S4b*G*Qk@f^}yrKMs=jB6v}SD4J8Dfj(eORuo)>aKOycnVC+Wq>rTOH?UF0!OsXK&MS#=mu=YQ; zF=xi+sSkwfbbUKgyipxi0)wQfy7b(R*o=i^nv7zDt1)q4rP(LffHwWkC?Nbq`VnMx zy^XuwQQO;r>xR#X6Vz&rz)(OnT7esnHIIl5{dNT7i#SP0=)zHGwv#ZKh;=}Y3dSBy z&Y&!sBv-KC(+d@jMv_1o(9)-R00rZXk8|n<0-1mym5Ghx7J#rPNUedRxufsM#6K{w zgBa4N`x_R0^%~oQ+tYkj!lF7P2%(dt4x&wxf~a@iiTdt+Fv>F|bdyOYq7$82Nu>V(JD_n^Bhwg-zz7W}tL%H@**uV7;4Y_XvH=B${{Y$! z!@oEfn=dr89dXL)0!KpAN$ySTH{ah8e9(-l#iCC$(`u_4tLjH@ZrIfqs-aXo`BgI@ z0IYh|O%tYvBKuBtaTSlQxhEfQ+c z@xQ|P%*IzSESUisIq3N%hS`dtP}}yi*!IWJAd@o?W}h_>fH0vND{NGq!6SPEk4#`Z zCRB?pm7A>;kC>6eGB=S*p`t(Z0=VSfG;^3&AXyN)xzeOq7hQ<&efw{To;>m+RGk#_ z_^i*Gc@eV?5Eha+ltLwKl?L0`vUsib#XHZN%=Kc;7R_d3AS8v^LNI-e0xVya&E70L zIzc9KNhU<55JX>3lBAvePGJJ-Di%u{ld<1&cIOvnM3u7d8%NU8Lh5TZc6c1tleekBIPrQB zle3T{vAecjp$L)RP?ZIB0JXueZ_F#=#8<>~YCxMX`G{kYl&-M4K^)Ko>{uUQFCA!? zIU2<&RYh0nk_iFPQ%a5bJKyxc<}ipSk1-QQ^2i*@RIp7l)shG`K}BD2g1S@(6(?n> z&gLX1sT+wH>T^M2MgR%~e-{)v;NU(bnNb%~RVa&04uWo*%k{hM-vG;}Mm<3w*}T%j zGxZ22jj*jFRUPk~F`H(CFELe*FA}W?+CWKQZ=OeRJ+Ya^YJ*88DHe%gH1DYgn3ZBd z?@Q`#ueUh7$uUT0o6f})v1_BZsqeu1;;$_t9dVWc9o3KO71PF`eF**jn77Gva_}(y zBSey}cOVXE?_3S=sXJjcp)AB-n9LY6npO3J6iM2FBz-yCY$4)#NnbFLsCI1%S(Ftg zl4|+be%QL2GOtp|?#e-B1UHvX+kx$X=2e$0GZLsO*@6II6oaOycOS6Z5wxm{N&6~t znK;)tr!XEiCd#sn&DDLu`uNX7nGe^qZXld+iJdkxT~H4C*{H;IO*^|h=(*=u=#s&hNsDqFR0ki z;h{!x1b`VPm}>KBH>2;q+@EX!88a!=ca{1nbsBW^lHR*>LcX}1tN07Fi95IM#v-mFbw|yiDn}3{PN{wU>h$qFB+Ygw$v-V8>`@61tj6!qss)* zn23tbMX0XyH1Jn@+kSCfpUpZqhUHx)%A-sh%%FxE9a*;Y6bA=;?TrdNm@!JxZ6RE` ztVc8^jo33zlNB8{p~r3OzVv-DPGhd@LEP`$x6ol`RCiTf0Sss~lwiyx{wv`{(A%|QlsKzrQw_=J43UExnA2G@ zyna<;MSyocqyzTH7b*IVl1c+HDhpY<+*$2?&J|VaIZZ$`G6n%;ZouEKe|!f!I%XJy zvWWmB{{R-pZ~p*GzwLsITWK9BN|wROt(DIRl-ueRp}AVwf%=Na>Hh%wY2qr`46o)M z~YKfHIJ4Q z{)F*lxij>ig`S*h6RM<946nVG1Nje@Ei&1-VzbuPjKJxVTN`Qr04@Ij3j_45WqcRJ zQPM3sNl{ux+#mME&QU2Lk~t$n03@hVL9MBYKE8*Oj|s+^yk#X(=1JVMMN-t1tL7t( z!QQCt&EaVoVu|Bs>a#`BZ`3kbG@_J>y~*~v!!MjwMpkREbuOScHhAsn{{R@G@hrqn z@rfQX1fE?@z~uQztEDIBSfuVi(RUQ=@ThN>Glk~CahI~sfA`! z>5x=}B^I5Pz_L#zclH&=8245MQH1e8rXtfiEUIK9QxoR8t(a})0d+P5)`Qs1xPN&Q? z4Ku5%utKUyRMJ8H>+g-EQG+A(9HOvELheF_wL94Zao^W!ILn!ChD5N7J(v;0(pHqj zz|fUQAVxu;=FQmO)`x5oX}URJiUg4(meXuY5~YO_2){4{*L+sxo)>5fF$AEZ?96V&uwVcNs0*tngI`NH&-j*Up_Dg}yEilr{{T)o!(j^r zvYW*73SO0k17MS|7{ur1K&%zE(uh2lK#%st_lRd@1q3ES6QS2*3^M z>u^7OaUMY_5jD&wj%>Rr3|Op;qK)@0eD*iZ;!OJT`LJYtMmY31utgBN_MoJV{RRg! z#$=qbBvN%l2GM+iHyrZTH~HQ@1S*bWvTzkImpO@UH%zihqV17r(4ck-7RMxm&OHuc zXAwM_QdXL00wJ_vf|XNziw1@40mr0r#x_)qB9=b5@)qgQC_z2{0K;#g!%n9zvb>6_ zMq|t(2lUQ|Ekx{SwwmIUdf?Lqp-hofnH5h-Rn?loW)`|guKUow2|HdZ(sVgou3A(q z>m#~qH9^oV>ITULK^~j>;rO3MV7scws!P0j%>kx0JxK0wk_nIHRuIc8Wr&TL zs51cR!0)IUWA($6y@{G>Q#&4DqYS}qEDF84+Lj%?&cNOn0YW1Vq;l(+HG{a&uB7^2 zwi{qMiDNRdK{~WPcTl92@|sPJa)En%`{G$|ED1At%yO4yX0TBC2>$@~M?JA^SP6%N zF*Zf=8Q5e}a@mxgH6R_qyRPEUdvEWLyJf!_^*<_e&`B%1M@s=kMJl@$RgLe>U%mQc z;VlU{Uo(}Bn_?co*!Die<7MG_?CJ2VtkJZ=T&!y(6Y-;jef!w2o;jXIDCJ7mMP-wY z(e)Gfhr*d+Y^=G5YZug+(PAd{S^|xoyrR3D6T`m~=Vi~q0%j(IF2U5SS^3>q{{Rax z4|WMsPUE@9&hlO(B4y{wtl3t;T@bQV2&TfyfCyM4Jf-^6OW;2Mii!_fsz>a8%&B-BWGt71) zR%}ba{t(ti<9^sn@sjKF`;t?YTQ}{Rja}4bGSSGu2I^$ra1`%it8?vw$2e@RV9S@w zyE!Xttjl^y6fa|8Mlg~1_F|OBB;@Qs)CC&5+~$G!<+ zW_eCW&hlDXVsvjng_{AuKxhO2_{c2|u3HX~f=fdK0#q!|Y07xR3cfNP} zbB-^DucxHdys9rpSRH3x~qBy1vMUkD6zkG|MU;gJU!$3@IJSVYN7tRbv7eLi44J5Be9OfwimtZWn8s)j!nnHvy>MRW4Iuw9q`0^*Czrc-7G(UGHF%+OOLfxx7y`k}e^(aG;MpsXBM6<72hqj}H7vD4CZ_$cm~oI~~H&;F>%3 z!)4;Nxa_r>`!+7wi6jLxCPr;4s!@jH(3Sr04ap3}!pZ<}2^1_4c{V?vfEJh#V&isGSYa2W2gX9tO1i(w!^U=q+^qQCCkSVRZ-M< z>J$NufH`CH@9)kutXXJJ#e|?iE49jQV!?MilHdijPO1(xRkb%nzHSz3syY|MH#j{a`%K$W>kZFt^ z>8`#@UA^&$iKH3nqGGlAZkn=g8%t24HaA|MJZQ7Zb2Eq~Vx3r#?5)u>ybe#rwcVd= zH!K%DvGRK%n44ZiH=GGs4BMTEQfwb_RaMExdS#6=^7Pf1q^l#VZcdb-N1^2GJ9nXn zvl-+}w1XpKr<+8w2?0pcVDL$+@3txyXUqnYF!`*^tjnrRf%4OH^V_hag2xvHvYX`W zxQ0H2i4-JEV#$f$Pn@=;XEaXsO6je~nET?}!~8-_{A8G!VpnGXd5Hwk^KrH_ndi&H z9MD2zT*khvrh)SLbgEJ0GMg3Y#~88j$)n0;@{KH}Kg=|V)KTST)(;}RPafEgKE>o# zi#bG}m{_9BI;y;mb!lN_6*adsew*MvDJG1$jsvSMKmai!)NMqLdmrVCL~R;PN2CL} z(si9g8m=}c``;D0w}(oY*;`Xk_v|XazHpZ>m`b!a;hDV0MVB+w%qiHcDpWI8fCmAC z0JncXTtx(WnP4^sfvRQm-;_03*n&P1a5W7PKywlx6}<*fYq4QtV_c7>7=lT2aWuKe z!zepwh$#RX+e;D--x03gP;c1C?EsWcOoaJ^4c$k$)qCGH{V^=Yl!ONjA5o138nibA z4gA3K^&4QBm;`yHK{K+*pfFY%)w%Ogt@Us{HpEk@CsHj%9fdP=!bj^xhk6`ywgH5f zq0GOGw6VYD2?!LEepV*CakbXiw!^C=%@0kFPVBd!rDO;P2QludJ(rEn!qNF zP&|^WUXrm9B_Dg=eT{oyIV{^|GmP>O>c0z{wmi_Tc_zQx2aQ9R*HVo{0>w0zYr6fh z6lrW5_E%>^%Op`Mx){}g2*3)ke8R{k{8rJdMA_wOFp$WSq)o%pM_? zeR1mZ306e{=!0BP@4gbwO*-gV2Zy>yzFkrjE;qf%=fA(UE-PejLhP}{K3!y&H=4{W zB}7rHj{E=)qCCfzuS``*I_344GP5EC48@8yx*OQo?W*@D7A+(&eq1OE27)vNF+0() z#F`}TH{;U@ooSYt&^igBa6;w51QV!|TzArUUN^?2rc`8AO8)?(69{HxY`pn&QUNPR zVs{`P)!=#@1@kgx3Y{U|DI`ZBhT&WvO}8#SRmEV@M1SqkGunuoTY`Uug897(^4HfL z@p&R?vyr51@x`5%J7c)Jy|2|r*KA$hj;1HU+aV85DDzOYw8*B>>^ zJ4-q#4FjY>NL#D6U`IP|x61DtFTi<&7l`R3uAm&6<9+`CfwpD`lYh?}OtyKI0J;^(BYQm6d*kE(0Ee^3M^ZB8wE}OQ_CLPZ+(DnX z#+RW-%nrl=K7RQ5_FQ~hI@e~X=B?1FZEiH0Jl3Lq*rEJEn3u!yiJNt4BZTT^3N2Lk zC(^I)iysfnth%pq8*T2m;QjHTIap?e;mjhmB|v39Y99$c)($z&d>=s+{{V)65@mDV z9GQru5)xDrMU9oVz3+BAcECRn{v^xfBN~KeT~5J@0c83bv)glcp!`etPc`^uq|JU7 zjiiQ3Gr=C@3p^V#ccVfT`wqF zjH$8-^KL9va6gsJH?SNRZc>dmzHl1Bm`DPRp2qqr6UUf)u1{>1G&A)V@VW)1p5B4X;q77Q24qz*|S zFzvWFQZ`9)ua{*HCSNNdieN{V^;P%hap&K;YV^dSUb;kPk@XqVTt~_oS$#(W?Qd$q zPGR#}(d2m^e-D>kB~uMjx={ZB-9bF_gIW_eZq836MYF8ARfK}|9AflH&cj9eQLp~E zABV_#vMNj>Q32HE%CIL%*l;Py8(7Dk<95u>HWKv6MQue%Ledew#E{!|vBo{&g`<{6 zm9$(Xh*c#;Q-A<9xJIGd2j~mf_DsCFT_@pf5&#I)MUZ*m4OhRmF8m@i8QEZyC^IoQ zy$zXV9#C4srzcAAM`6Y@c~n)3ZEMz8V3mp9$O}XFApH(BnNFNPiUerNswa&PNWTTC zw6|i3Z8i1szxK&KQN;E+jd1ftZr0)LQNEY9CRE=0r1UsSI*LTT>#l2~OX~SE$!| zs>L$18aLUV9 z!uyf$=EaAK_?yR^SfP(EA_VgXS_JCU1y?oS8K(J7L}G$aay1Q@2c}X;+;Mt}@9shV z_QKDbmIx4_3K@taQ7wI{{{Vk%W)5YeM2>by&RePS;FN|lIQPNQB9C4$Slj3lAH`pEk`QknupOP;M%7lA! zsHo{`0@-X0?)~op&t|4RC8s_iT*ONGHdvC!QZq_Tk_D36;<&lxx9Y|(mK42}lg8l# zFhcSvR#Iz8EkF;h^@%)#C6!3C;zgEru_CqU)3)C(2t15V%kvqOk%MR9lu-9nSUGdmLzMllvDJ#eK`Tyhcovnn7up3K|q?ChTqv zU%nB~c(!V3lU#us=$3CF>@K(fRu(;H8go#VXFz0S1d(@bvGl;&1eDx{qc%!LI9uZV zm;3FD(tmPsOloPTf?T+p^wHU5O-VtOw+sg+$=dyQ#g`tS;z!868>5E*N{$Y2ON<7;~cVG^{Z9?oHP)B1_ z;UX9`3`Yy;xWXY8rMip6p-BSh0)AW9 zt`*5RwgpoMBt_>rZk%&T*G@cHe!!z3`V+=UB$HPlX+IuLO$q;Ga#ed^;k%c`@ceR1``L zwU%Q;k4nRxL!h?Ei4kOWl|tqageZ!1nj4eG=huz#9J3ZuG`lQ}X@PYdcEO4=8LPkvhGE308(Q%C(T&ydUk((f}h(u?qUQD|yhTmr0 zj|aD{Fq<>fTDu!C4g4#AE_cD?%|N+slh5K=$z52qJxJA6t#}u; zdwLzPu4FW(U(CrgQke)NFrsBqbi}X}DDDS(9N<&>gwez~g1%vUy~3HAywfj7jXk01ue*FK=uTMLRsANmYOHBT8qGM*wr>6bg!~vq$K57_RYB z@<2q$I?+e6Cbo1zM|ADA-?idmXr)rj`Fy)6D-nqvfg6%N=zu}5-wnYmPn3@=@U$yM zUSNur0@YUgkaswD>7r6HPUL3Z2>}Y_li<=CuYh(O59Nqu^{ zh$RJa%7yy+;lBpV2z*N~Bt)b}wjg;;*O;E*R{sDj24>4g;#qQJxK>40a5k^(VO{dk z<-<0Tnw0JEkU-z(&33VZl35}9gIp$ljr=@ zkF{;{P+~Xsb zk)$BGyIZ4sl1??*Jo>mMC(~Q(Ks85$>;9Pe)@!Megs9X)pefiAK>hC&{u%iDGvL|1 zIgG5Ch?RjSMzN-ipIhG?*v-{K6Ev2|5djH{m9U(TOyG3tfoc_Tv{m9pG?f;#UpQ!>19DGtEosT8k=!^;kWVfk$~)U z(QN=-isO%-K4+QyTE$o@Q6peTG;e{5{D@pfrcGKj0jY11z3hIYw|roHFG1|8%q_N(H8ETG1s-`lyAQ52S?EDvRxc?Yb_;tG z=zX!R&F>whfXFN=?ZC6Y{Ci_Dp7Jz?KozS}gG*=A`eQ~m0h`XUh=-9-IMzV#`+UB5 zr_U{&K)AHFU^n>p-nriKW#U7G!Bmk@+g<6|38hHnC@a=YU5kWKi#VV|7L6&UBKzStM*b z3dH9_rjd@LXUur^$Fk*aPN`cF?5rzb0R$hferw!guO5snnamu#@G>gw8WH1{7E`hIKL4i7}0?K-@QZS(-O20%OFbwv07pQC!Nh5 z@f_Opq*$`-^#*99big^PDr>WOO^|4s^v4jgfuUS6jH;_J0P13-DIos<5%%8jJmz6Z zg_tmBR4XGnSy_(lA?-}W(I?d4{fl)$a}kKnVT43#n8urjQXi1dk^E=jZaoqgBjGl1QY%B+(!wQk5D- zRqy2+`tOH&64f#4bC}PTcV{V?*4L|2mLMDXlsxvvkHehF_?s;zW%LP1cV{d-i~%H* zupwKVQ22a}o$+UiMk<-C!@yj(0KVi`HO}|j+Yb0vVTNPD->8Wsk!WDM5E-3r_p$*X zUlhFK7D(9D`b}0*74rUROS2}M3W*wBY(oGI4eIx-IhoEt&Hn(AoR2XVQI%3u3c4%G z)|>Cm0&5Cndc!FhNaW4Th-z%yre#Va%By=_pGy~eVVPB=k@DY}NeZk=B#cm03%vnq zNHwrG&M#NK3=|!!%@%eL~38D4QX@I}8SQO$gFW23i@TGRA@?eQF(rSpxX& zzA7!g$uzwXia4WF9LfYh7m&7L7_!hD(cEvxt{j$en8_?_5r;6cw34$bfHfU}2JBD< z=x%Tnh%8yO>no&tIMO52>!)C#Za3fMjxh{S4ikaz-s8xGy@?qUgNgD}oi8QJafSLV61#>}5+VTNRo;)Fs`+3Xk!fCsVa+;bBIwS@NzzPl3XGAXarZm^_**YJ z$l~zVkG{CnZnAiZ4YtWs&Fj&NGmPqoh%*u$R>roA= z%I~#oKA_d&St=)&H7iF3sT~4}u++l*ve>f+}Qqjaw36aNWoQLbXuAu9@X_YWl%aKQM0?RnlKiE z-)ir@;<ysG)&Tw*%I>-vaRkM`S~DO{t`snu%j$gUB`wdI5(s8JMA0NM%{s zh%2Yc$FlA7qH$d#ODn6p%Vt1Slm`9lz2D0f8*;h}Nl`51L-MOJi0KTPL!U4tfG7?| zz}J7@3g%Ktvz;RzY5JO^Z5d*khT!>#*jV6K7;7^&TP=}|^AeuCQe3j?H)L=R(DChx zDLh5wf_R%oODwT&(i2WtiwAx6ZfIFN_rq~tY&Y~p=2I@H5<;><`aTdkYIoy*i|2~` z%CSx!YsO6jz#=@Z#6b0h!ZND3nST=0y z&14rcqEcgho4W%Dp*GI@Nvbw4u`IZZ@p(9~$z*oOyUw&&jl%MvrDNZLhK-%DK( zELk16{qR{^rpCvy?A%bR!ji~5l7uP;1yr^1#~;2DmoXG^&f3_rjd6h;5xKG%wzI#l z9@uj>=Q7bq10E~*uDCmQ3P)^E_=sub%;_4k>@_N zOC11$&8J@4NftYuUF_A~F2Xc9eB?QPj%iHdnpZNavm%B81q0)n?<~PlGx5bS4#ex+k6V^Id%45=ss#5&5XKg2I0*!lwuIQz-KFqT~s7NsY z$Ezp+I)Tl01n;-DBO}CLLmr@GQUyyr=!+CRN6QLkva;o4{{YLDUbKn>2m&tW%)r?M zjm>_}h=f_B@Tunjp?jkFpx%ZOAtQ>A_r8PpcHqsQ3qc#dLIlgnJn@JB5o z^_IVez4rGf08bH#N{A6mj0=C|O0p796{N5|{SO@W0g2{TktdE4Z)DV3s1OeHF3)mK z+hX>+6yk>;GN{cdl1M29j<%9ot5&i$Q$t`o;U@8Y4$YSQy0nj|NnjPRUlAIJ*d4F~ z917p7E^XMT4yhd#y@(}_+mB2H&61IVGiF_ofQlzpB;MrEHT2^QK&XwunM~6L%;%WB zl3AxJ`R)6&glT2bAo))%y-N!qT!wrYU4DfrDvmz@ngR zb{0rCSAojy39rl~X1uye0>lHPmEO+7zo(`NErn&H1UX!@uD8r7$pF`-bAdJ?Nq z2T(0USE<4-V$2zcf_YppZ_G%}0VFT+00B@z7v9DTB)OEiTR74VyXHqNhh#pKRj7g8LYrew99&bGrwSHl`J_s z3>qnN^ESMuZ#S4}qFrg3km*|5!wta~>0fLNOB%GVEUvSM{Yf;EglhPdgF}(~;I|%S z59b~!6;r5zqZcQff=!=@k+$_i-x5g_qy$7|%*PRUTQW-oR&J|7vC7trNE+ak2Z}JDdEzTQ4k;$)H7KsCH`7q<~1+ar`)(pFkwciAZxfEbNfNvJpJ+12svJ8$uWTkW@OS;G>ih2>1I2B7Na zJOe$OT%*DHU?XK>S;B&BH#>7)m^jY^BkiUp%$&$VtLQ~v$l|j*9VwNX(1rl-Ai{X>~Z=J!Sk?7l-C&!)(^yQDae5uEa^nJ+@CsqzMI3Ylx zz1^PsY%z1-pNH~lM2!j)tONW*as0O!#Q0x|#?Y(84Qy7s?r0J7+Z%5V@ewApjm>Lq zA=m-%ji^x@iuoS+@!2Bc&4+W|5#bq}#EYECLhB>^3lL4#$QR94)%C?Mi+%_22@~{8 zxnf0Xj$*bq-iF=3d|2lseAZp$S0ombV`KP;73r`a*~S~jbN(ThZ5eXf8`n`MemnZ$ zVF|ZUbTTqav5e>aKJbV|q|!NH+O(pP&#H(pPF8t6VjB-ED%zAFxMQGAS{9nc2`D3SzW`^ACT%{N2hD-4FK+L6K)Wd+| z;TLyZy?&V3WU|m@B1Mf-A2?77fS}j=@y;`_q05aybTX1YW1Hs|Fy|(sRh>10c8vLG z{{YV4JPuA%?bsJx8Z5FMXzZF?9b1japl??rmh$h$7?Uu8L0>6Q4TY^BspD<^aMN5d-Y?}%mtMWibW=FY%uf2J~? zC!0dKfTwn4^63@9;*R~W=6^aEIR{?DQ2-HNZ<)ql#Pj>JYGS)sx~{{0&2jg}+*s@e z#WT6kxraK53U>07wPJ(B^A;lssRE-bklP*&@81W_=E+7XT(AR%703Yn&MGt05{V;) zk(7`n@x>dZY%ry;@2@H7h-qQ zV~!|&@k^O^MPQ_wQ+2=}Ja#c7B*i9WWpg5`kfN5G%t;&(#>eY`uTD3JA=V^S3{A>a-ERy}dl zilk$cJC=;=OvC945;vDklE~x+6?RWO@6xa{8O&wlc^OI-M0Y9_D~j^5s~dOj#Ba7d zt|L#&l~U4(iu9}y;F7DiHVD6P4$ zElQOOw?5Z@JMFQ;X&{amdWKYyTT%TnLe+qvNZ)XG>@N&uWknOmA<`~_HIKEQwkr@P<fbXJThZBxxsk5Q0q(cjJ1$wa!8$=)kY2E99Ek z4u4VT4Zrmqdi?R1E3J%*v|ji;$Ntl)-=`5m<^y3Qa43Q|W9IBX$~HS<-!nW*m_=ua zB1N@@JhA8tvK85ddf#tdv0veNOvIBIQ!OZC9C9m#QWdKPOk(l3vMw&eVYJ8gy|V5|fg zytZxC5m{z^1uo4B9jw?RYr;21s7Ei5>u*A=rk1*?*%ZOt&D1SRp z01#9v{OZ8i962;1R?$|7@-TeFQ)Vyt<| zU|U_D;)k{k0YI@BlQ^;T*_en8h6cd4O}$3{0M`?aJiMQo{M@iH&Csp5E)3SrXuhSeL`DcVa+PUv5HoV zQ!1(T-+}MI-v&u~akwce7D9j)>4gMe1Dhm@$DwB_J3wQVS+s)f zW-^(5Fw2?{vjMGSi$Pt^#r9#S?`HP;;;%j8F!f!em9#51grLfA zkS}^Bu!On8GAypb8064ljV*{5SA11p%CMIOW0xn{esghU!@grM*?lQY;v(P0<^Y=5 z`}$!#-ev|QWIDNuP{7Ef1@*4{U+;>OQYTckq%WaMN?TE_$i8dWJ4X}C`K9XVX!Ig~hFH=4mA{!d4qh_RjT3_M0+BG?*GRW2 zJDN4>cH*#EOamrkT?*NC4jcjt6J3psfgAVV(-TyAqcdgNSu=1jo&#F6m2KL=-tDY$ zb$DV_l0@Zl**^|^o?ci&I^~KZGsff+yn;g?Q@-0#BMX?s!~XydE{;|p6;?;RR7aTJ zKzMMB*{72G|~@Ei94YlP@`9U z$J2jIAI2wdBxK1prBy055;W>NR`q1znCY@3_q_gQdfeJnM7Ji>BL9*JSm`berlc>5HfMV7~ZG4e{>EJ*GzzstWZ3Ww)}MFYHi8CV5DEMtXT}~ za^!L&qM}$Pl5^B=^Y_?rN`d{1U3*|PvDo2?R+wa@MG-*cO2n({XAI?y)lh0)!M$)r z{JgSeBaf@%GJ+O0E|IM~T(W8E6E>lU!w4Vbnz-D@23tY~tPXF&Jgb zWb$_$C^Z7hZa@|Z0G8aJH*hw>=NH*_D=t~{If;?wShqiF~ys*5H z$kf5)1V`BMeYVx}^sFuNt&uflU*)rm^T3ZI>dHdEJd33ZN=+a00Y|y};T*~Qx(0Y4%8+fyjJt8g9V@?b z2j3HIgqk9%kusU1%0eV+C4{9|*wbL_+uy%94Dv~te6oaq5#>EN&rUF=j;3K|_Vgb3 z9CL;%t~s-kJY>P4g0ysb3Vfh#z~=^%NR@4&&BGMMbs{i40n2J63$wr^n&1puv|6GK zm(1n&Xr(I2dJ;QJy|t65P_h2_VO~tMQUsDXX3}e^mP5URSG8FfRkGPk!EiMzn8+<7 z2@7lhpj;8S?l!}E;8{#C|2O27|RANky%1nA*Rv`P_2j}`2^s( z!RJJEiBqLy<+B=yH)LrdxjgU37_-RdGRq=OnCj+WbyF-oYwo1*Y#Qdp8aSSMcMJwt zbrs1}D#4I4u{2y*C(B!IdBk_vt=%zBk6K13`T5CJpvD$k0CINs+qMAdCXj>k1$CE= ze=Mg$xCC~^vCb^nXxdJ^%;aebsNKF0N$24^;t4D=BS^|sM4F#lM%7>ue?mLq zPEz_7lM*K~>WtvV7FBIaD@Lq$B!PV0itl)o5CXBRVe--ok5b1gG>kX8Ce5E;<%>ia zRx2K1InoQfD9F2fHmrB=^f=@(Jdrs)A2_m82`nmS~Kt z8U_m+M5Ni^4s33D-)u1_ggn_vVm)Zo#!xxZpdl4yMW5g3F>3@qADbYeb@eM*bvw23wJ z%!Ka7h}&bSTkO-5e1G7I!H|j0v@{_O)ziQmqru^ZIsu>Ab+4_byNW(o@U_sMfwk&#H4j?O( zoEy0eRBws3q%y=_CK?_xtSpddP_2jq1J3q_aeXm)a<2zoH#CuxZk}Z) zRTlU7u=j^}mSe&vNuy?o;|fABt0C-H*3SKhxWc)Ve7rHLt0GDO8bBUh#@qby7H+0- zLo!0MM#Qxcpfq+?{g?siz2Nw)k)|q5!RY&U;V;GME2L5~sc?*xF4eaUxw_vUW$+IW zGPEW+x|)ks*5;^HZ(MDU#@-*Bn6 zyxnch@WB0XUHEgwBbEqQuo|?u2EilDd+~RTY*V3;8E6%gv7&5k#~fqH&74s=mZ(-{*># zU2Ivj%~mWBPhro{n)&)--!UkbR0VXxP%#DWg?WPed5u}EzIdj5&{uJ7=3QE>0!Fj_ z{jt!Ev2{mNObpr^blTKxHwR(cWAhb??Dlu0bOvHUs<3vWw)}s7xK}sgijonm6Q@Z7 z_-~)7C-%m3JL1vFM(BeG@)9Nu^(=?|_>KdQ;YT1t4eeu@Ei-@MdW+c_HYhc8I zch9f)wjr9AP}TwJNu`hl+h~XezGI&Ei4@YMtgzIqYyzgN?kkV%;=41MDU{7AQD|e3 zDLZmS0e+3g&l|Bu-h|DS*|yW9s-4OHFKWdWVT*?caYP_2Zq0W3;r!8%Gm))YGESmu z>tn&=73-wKFmzU!y0mUm>}YUBZMN#y>Z8{kOjzkzlO%|=vJcgeJggpYl0^}W5k-sc-4%*^8EC3#THI)W^+u^O#ep$w=+d^9V)Y)Le03&-V) zr4+;@UU4a7L21Q-z~j@uZSZts(~t8s+LhTNb^SQYZNBO^WF+tW6}iVhF0%fk#K94n zB-Gm4^lS+r3$RGzx271F4QyUqw1#=sWi1?XHe9)3u?lX$<6(Z=-XnRLBal2nr;b4z#xP2IDQyRZI>bc08p~3=p-_zW$ZpCI?xbSVccTs2AO7@${A3Jsh!TuB%!C@Vm`lIQ;}<-18P|e zfr3>z(dpP}t+^xL7fq&L5td_@T)oKh1yvzIFR<=GyQ@9M_{*1LU~4}Q&l_ho%L!_T zX*z}S!~=Jv`|>eiEK;$R;K~SQ5tLviGBk=*DWz4d_PzLAQ+^-JM004(OCsfgawvr- zarNZavBr}lEED*8J3!K~LHe2k6TOxM?gesBVR+;cC0JIL44A#fK!nyXr$=0Y$X{%>HA((!Ztdq$k ztkLyX%8EDYup-IW9qZzt1{jYlkC`t7l9>QvLu-vf1*_QX1)Ya>7-ub%4;+&e)2~Xe zkXhJ|i1#DC9kA-;7b_l|#HE21q>)>-`)|khz7s6b^iPL7>__qsm&;1>%o{P&CPxrWZQ+T0M8*SYTQJ*bWEgNEhl&eKM(sJH}?@F@p-ByM-x zwga1yvi=N{TMunJSEPi&4cUG!2RpEH+{qLm!SCb<=rlq%|{(m^~L zHOaDlut-^8Z9!HBg`PH8V`&r;KnMuhjrOlxGn0_LW&E%dj8vhCr5mv)yIO_+0PHad zAYma+i^ix|S3$MY4efUu8{luxzBi}35mc;Y6ih?BkXjuubMOVg4p!;%~ziM1;7R07;lHfxdTwhZiLe6Y)-Wyz0J zO4(+zHLxU806rg{0?|WgeQa1*H_Bim#$vmA&Ry|CrUQpM)Sn};xw*z{^=P#9P z*G^pI%CZGx7MTERPAEFH(&5i*>tL!|QpGRZ$yMAe`I2_pI0wDNiFi%93` z#}llPIp0OeTEA{d9Bgp5Z#*N`sM5;r3$w^H-jSphZ;MSIUHGAfGWoIjXxAtp%sg_h zQb`LN+#8}PK>q+J^}ky*y^^gS&c?Z1&Tn-@fUG4@l{4(-{Nij-ngyJsZ}V?)7aC4<-Xh9leiw6 zae(7)#;Iz8E2+$Zqf!E)STH82YUj{hcfW1%XO^8(NY&y5x~T-P*y|*kAdg&C#`Ae( z%4PEfPz`zzHm!n(8{X!uRr}%fisw+#zM@rhD+Na(H{kkhNAJEbi+Tn~jM>+fr;**z zjR94tABb!hjWxF&y^bNB$v=qaV9Mp8moTX#3F9nSFdFERpq-5$uN8*+JuJkIBo48! zn&Q+&dA1_#_x2x4s?T_~XD#9hneeG)f<>@oO(dN~iQjGR#@&a{48ggLsz=ZKEtB)P zRt}3I=OMcst&jBYfSnc_;K!%8}7+fdkwqkLJ$9I?8h z(-hJc47B9AP;|PjQwYlfe(hgzfZk^$U`Ui4%yQB)ki?DoQm!Q#*YfaXqi~2wB~1z@xxX$M2|2Bzi>O&MS?MN{K_uuvZ7X4b;jjAoeQ|A> zg=JVZDhG|(WEEAv@81=E4+)3EWSz>wv*tu#mO6ZajQ{}ddE*wDMNHmjFD77x$inC! zt0CUntgpmDJCZ#II7E`L8g>cNIO2DW^`K$qgtDp*6}J730~``3hm5{X7m;kDl!tNxirVaV7iWw*%c&1m zCU-^h{+iI*>rnIu0ci(9b!q{$-Mat?tK+qQeX$#|<0dVg%cM+$GO()Y59NbiqD@w# zw`#6=!Lrhl=fY~-Km>+HIVT`_| zhG|}0zv;wP2#=SrK`d^7BL0{v(R_(U3mRo?>$(uS^dKZ1yxJ_94YwTc=>eLTGnbiX zcrv+`a$#kPMJUBeZuCFRcl56K;sueT8En+Kr30rVOS2?>$RJ%?Y<`4qg273zPGDu3 zQI<0HzDm-?lmVpBEoXB?R``h&-p6veC(TLI%Ss?fvgRe0Eh{Xb?8m>906zFrubD`4 z%L1N^askpuZ7Jjluw)}q9l+pLCmeZf!-15JI9*1890Sf&4mBtYLoonZt+)1KctuZ_ zi!Uso69GdLxDr(|`Chiz-HtgJk?a=e6eBkjEHimj=qd%*HNYa!=S_oWxU;e1vB=#+ zD>Sl|%du#qM0agev;iY^9k<-p;|^r;r5z7sXk@ zVNAnH?F=%IOV{O`vl?Nsmi(jxxr3>Erb$W# zHL=p8Oc$QjRpNzkUyfWn@MM+^nTcvZT9sI$!#XpG5&6`-=WdV?7 z^)lTt;kdFa4@;x(jep}*%VqO^ADW5YTS;=ogpDHxF^w#?HU)t?cN?DA#b#VdG+Cn) zstI9{H8m>H0E+{Q1mCg7oNDSvOBDG<2~N!<0%j!t0OAn)(`o^QlYZKO??Vf=q{?Ur z=oV2E4I~rsH4}gOnjV7x`ec1_6)|B z%^pA>OlXhH>751gE=c5=xWr2gWIOK1V4*^;$`7{Z=0L>q%&Vn$E{UmDfaBcpc&$wt zNY-q_A!P?rgYgO@Vc*P0%DwRjGc7M$sH&tD13(~AJMDa8{uZv) zDl>(le46`?_+3~*oR&PqPpIXcRY@Qlj1@Ky6fe?+94$MzR&~*}y;X|`kM_Xwxyb_) zBc-U2Y`3H`k-;8>{{T(#su=jAE=lriv1W_`qlQvWp6m$&+V=Z=@aA7XE>WXs8bm5r zr?+zF{dpfORI@w@r^s}JVNRe*{{a60LkzC9vXTW6Ivrk%s2ln>?}WT*>4D9yqx4tc zFZzVVioE_r)B|ux3>8QmRT`u1kFI!E;sno9bpjm&Rg<{@_TzE~(;tj7dD$`@XUq8j zm2JS*KsQPb*JVdNxW@Oxe;(%YKs1g~&>;_XVZFHQ!|9JF=YI!nxnnwbyr|1k?EN7( z;w9-4x=1I#08sDWx%py;H{+}2*CMoQb_ulvU2K2(fBPdJCZF+)<=6@?7^4773m-4% zgy;NCnVHzEjKu(J_(8K)2ixV2WIu<^c0J@r(Q}?T8%5=k?M><&@;~3{Sjc#Xj@uH= zuB~hezW4q6<2^r#&oqiP+7smfU9Ou4e#ZOy;VjN!3`(qHetK4n|lEgtoI>XaIvleYhANXFUrn zMOlAU%CKr1?Ld*h+}E(hM7|*TlpQw-ZKvVvb|-I3ylIXZOVF8QNOLpdBu65|Y$nXe zZud4mUTXPv=M{72L#(n^#b#|gFA7Cb+U%*@vH4(kBf~PV3FcP}s!rud=eH-E9AzEy z(*g%jf*DeUAVqWZI$PXnH&>0Z#~2xA(KPNr&@wo08ZaZ-`pH45LlHQ z#*L+o>NbYJ^3dM-@7!^V{Ki^114b+pp-#a|2SD5J=xCfFn1@R?kSGvi(kkt!6YKN- zm~3XAfN7$nu#A=+Mu`a|UX!GoD?`?1A8*4h+` zNi3cBsy(Z3+W;_u)F-8WQY>y7?`!#2*BWBbw|#)jL)6yMyt1K9K$!Ott=oGptwZ-U zhVq$~L6(y-YY}Za*OR2KtN8}gTkJr@XGrIcGa{y;BWl*R7?wU`NB;nB!nVJh1Pc1~ zKU*;(mSR|)C%85_plth!!&6OcTeQv$q(dO{J8E*O6&N2n62OpsRH#x99$#ER(S$4j zX~Z#-;K%+L)ndJgaee&2VThYZjS(59n^IRyHn^Q^p7g@GcElF>QBh z3W7MXq#O3%1x>MS(gcJ}B(WE>A{IIuYZk(YJHWtOb4w60a=ZD74nae{#f=r5_owTzvSryvv z?}t`}Ud}vy7DYQOv5hQa_?E~g(2LbyOj^MI0GRyM=`W}B!J$Q1PylzVRH}6v4w{af zlCX$TwvSfEtM=z?TS8f2%}pJ!MQIa44V7O|`3mP5@+(2cR9<)lvF3b7I7&cbGKC-j zND4oxCcSUl8yuU(3PlD_r$7+Sx_r6b8uG#)|^Y4t|)*;NH4KgZ64Nd7&I3o-~qL zW+cc?mEcgIEbqqK9nE2>+As?kS~OMG#I^IkILvAW5K}J_Ms!?}&(s8_!~wXik0jXM zh8mJ%lth*}WXwzm5%o<9)8#r7in7DkzSW8FP`8!Yb01DgM@3kpHn1GEwVUF%+X?3~ zqg2v{M?l*%8+AH>*d4bbh5CErts6%xK^ifZ6+@V3W2BuedyTdyfO}zCgwe{`G(;}z zaV3!3ZW+BkN7sB3hZks5n`EGk6(ngSEYwF=lVXnLe6|5&$7|<&ODzHkqmmfdlqN%= zwJjjnR^DS^dhd8n2~tRZ@%nNsatD#<0$7V8k3NH&A3Q&ln257WBcvLFh946z>78E< zP#f94O>fRFT6Ts;2pJ6;wqhuqVtIcCX|+;HflC`UEubf*GM=0lK1h z-v0p86w&yK%aam8t1JvI-gz{uk-^kQl-qXWV^@oLGsZY#h)EQWBD97=3F<9^TC1H* z2lU0OiE@V`aK`bn#|~VfN!#UuOP5IVQ>e=2+BHRK5w2^b2Vi>WUURa&Hh3TLw}Mg|n#F&hRN+z<YAw{&SuaK^wDGI0q%o>2+@7!Z##-WJ>YPobe zK`79rcWngK`fvXL?PABK^9;cbv81svV#ko)YWE}ocup>oXd1O1sC8g;^GjNN%U5m& zA=X)9kS^0j5)E2G-iojzk>yeKKKR2pwu-%%LqwSvwX3?xLCN@(sVl$DSRb|llf2>$?wb_>57Z^syJT$JlJW>rU45r>oES|BQl=D9WN zg?ufd)0!eM+FUZ&0@Ya9Ke75_PYeOPE2|Ne)+o;N@K%zvg4Evp*epx={)vu-mPoV;sj-{Q3v z=7!?QvAzU)p&eH%m(0j_GpLXqjZzz-W`U}{`(3@^(M2j|1fkwy=%e)XM-Ut8>QFWS zs=jDouak}6&zg=Vb}Ft~YHLdb0M)ggI3VIeE&CNIsOYI^b43+r8fXSK$O{?-*doT~ z(2sLi9z`1Y{jfMPKd$Vp zJ84tIV|yZpp&j@1#)=exW>NY;Hk&IJsv5SiYkk!3Tm3L$mW@bP%4YK#c+|^bc-v9t z>*GqEH%i<6GY4OJ8ii3-p2NKy2|Ir zN+`1K85X$!uuwiC0rPNnp;(4r#qycI&ZbfbvxW%N<_d<1RZSYIqrmp$7F93Tyn7O4;-tn_|)?%S7(;_1pJU2sTd_wf3x3z9LqN zQj2uV^45wb1UI8LX26m#h8Fu6u2}61MBz2w6hHC z!c!Z_4v+v7H%$CnQUT}>r`H~d;)%f_;j(@`^#eoY+n=Tq-O~R6V77o5&;p?46yB{b zBBqVEQG3_77$Y3I>WyW}qoW0MzM(sl-H!*q^1OCj(@N3wL$;S?K%$tbHr{X;dX5M6 z$6GffIbjiq<;pFC*OAmM{{WR1!QZ~$JPV30N0ZomT zx{;^`*jVTJZH_4t#|s8F$jk{*P7Sc!8@sXg#Pcm?Hq=WpvA*El(Y=l-r*wuUs>S3| z=0+*0W3t*g1lZrxa4dV_7l_3nW&KFx1YOzO0&Dtr+qNu#3mP$IvcQ6@1z9$IuYaZ| zBq)y}DH0%iB#Y9;XpN6!`_>wzLYlD{T+U!=Tmf~ltsS*c91kt}frew0l@X*&42Ttg zuUiA@LwrIt=*t?$&Zk1yHCwLxow>s%B$7resJMG#4GRSEez+FZirNX6%wAa?w4rHU z%Krcfr6X^B>(Y)0XF&r(ps;wfX|gnF>IFYy8`BCbdfCH-} ziF9v(4);LczqS-C?-F#;PfmWGpyhRju)b@rFYzC7=y2@WVDds7uu*%F!8NFlVsRDA z6AE8HHKO}&7k(eo>UhSm=i2(ww)2^1C2CJ-upLg z_uSwIB=cq&h-zbOLW&nddLHME_{N{|4{s?hyF8Jti$!O?`yNR8d*IKIGD?9Ji?Rd* zNat(b{eb@fOaso7(R^&kNs#F?20%!ZAX=lz6~Af}ckhb4=3KoX3V=H)B$+Ih+qN?qn8}k$5gdX|LrUyTgFtby{#et8CoO|0dMT}4H8O&$ zGJ>eUGwg1+_TTC2h?yjSEUl>sjI3-Mw%~mQSFYG*7ZGNok&7fMB>!j5kXz~K|E zmkn^L^vj(j>Q+Ol&sulrlU;hAT8+xByTY9h( zcSIa4IgcS18zO7wE)rd^4HfD>dW^70 zMsygoP6RNi*m-n=C3YNG-`5M5$^LFveCo!gaFH1zh!YSLPnZB3ZCh+C^LS?_tl62? zLZQJf#3HL}m_rR0(Uy|~}LF!D&8skAJzyzV82IONbm z{&F}}AvK_ATRleK;vfsYaL~;H8Inv6SQ0ndpB%1a6RvBuvg*RuX&u1mpjviD2Ly5~W zoAm;ZAYS{8tYLYw-F)obe8Farm6m4R*;r8mfj`%c&0+rl&LrBNL)UdsS~faWZNL-& zv%g|bd^?}Srjyz+p~j3L9Z=$^cZ-&k|c>EKDmOmR-?6m6c2hk z_rSE~C5^K2fo*RgVn`rWYOZ^1KAd5k@35f0fKdQsi!%h1WuaoqOYT{$YilBxVgMe3 z`NIIo1aiElb@7bKp+$}+C6qUq*tJzqMFM!TdBXWg-bGncI+1jhqyl1+2PT4w*xu%b zl4|I4fglN(h39SnvE8r>IVan z>4Y5hMNBm+BLd7$o3bje*biFz;F52$(?sRWM=oL}S=w;s)#RI{OxLK`8`08t6+;Qk znOS34;L8V?TN$L0)R1@WUV^soiv`Rq34kBGDWn^RUznfD~1%6lk?S_0g3^}$cP>liTwyp))AFv!_t(92f z%aZKOZPgI@MS@uGU4_}-Yzwa*P0PerLyaYd>;9zCJ-gzr)gUCDXtstHA&hb8V*X~3 z*`v0@&~xd642SB5XURD%QyT$nGCd0&!<1Wf~xw8+ilJI@!RQwre*U; z$1f1ltVD-So7AKXwkGf(o}b2xs0G8KvAZ)-tYFoWR#?lnTBF16Gn!QN`ghvBDdqU zU`^r~opxkoL~@})+Nd4x-)r~9g63eBKAS$LV5Rk_0{d<0xS`zE7_ZnHzKVSAYcCY> zZ8-v@XETGU*zgoq=Z}%a2aEW><-9Ikg_v~>ET=-2h}G>z z{+N_6Bg-n%(TuwQO4inE{{Yd$3S<1i(Bg!Ic})^E>?_m#F$kq)m_nKqx>CvfLs8g+ z*ze9aw{C{Clt$$SD1>emXGUgt?D;&vf$y>paDKRnU(3FDjo8ABX=rXhv-SRXCb?Q= zcM(F;y%r6ya;A@Z2KT-9yiCB!1jX1Y$q<{@?giz>-9a?Z`7 zScATgG23SOpC~_EAu=*_)W@O5^rUUi)BQ1c8Z}9JY8)ZdS*VI_$i6uv?r;qDLMO{C zu$dY9V3^{Ix!>4n+-`4haHRGFMRrzRt4>1%LMYIqM!o&#mw%c8R+lE?at2J8i1 z-iKjl(37BPHDLSp!x1ns%_NhlU3zzGef3$}kSpi(#m+6eBy>!q zkDYNV(Ug{U*Q7AfeC^l)ez=xWY{XJV@jFMDk|iorVzO$nME4*9ef{vpONfswmAX>2-8^4B)j>8wRWEf?#u`}hL zTCh=?Y6Hp$Ayc~1)Q63!K~LRQhJmr|8{XcP|p`QMyb<&5o^S@N<= z2!SX>aB_*QNx&}Q?w zc+ene!dq(_+n-dM>gu~BTdr0slG+tuRS!Q)S}k>&E~6iWH1!v6sI zM6!8++!_r;@HQLO;H;TEtYT^(;V9fm-;MH1>LfP`0xvoVw& z;Ve+Rdh#LA<@7)muSmS;4ureY)0JRzgMwz;?BXG$toc{p1NFDO1tUwXbl~TDjPdn~5 z1NXz3?O91Af_Wh%5$0(E4UKJ~XmPgW?S*crY@(E`vHt+LBnvxBpOPXHm+>m6k$*Ufi2Ns>COHETklOjtza>%nu=e7xYNTXl?F1Y6hSec|)a_g6q z^+9O$GB6<3a7_vX-u&JNK58*8kNcc&qFvD&SaHCu7iW*RwkJF05sJ2U28lGiPnTjr zx~k=kUied!L%7`t%K9H%=?01|TGfdJoy#!(TjMd~Q04qWNo0VD79~ZIVM8F3OCCzC z4(9%2(;A^9v974|Fh!N31DI6~H62^|* zuyL2Lz3`129EM^n7SF)Y?x)e^pe zP5%H~KIwit;z3!*Mp^ScMU{oBMC{56iv+jv4hPp2WVA44^Gd22Bn+c-t3V~YaaDE2 zjCB*Vb>O|Ryskvg3@+{w0RRH~mA$}T2pWrff^eDXNTHahjdKc`LA_hCEQ8aHyI8~d z%;dsZvamX)UokUAvXT|Zpv%X?3Xxv7?S++IY{p6`!rbveS{G>BQC}~)(YfC2;1Zv5 zG&w@*Ae%07*)4Lm@~}Gt&a2uApO89C~1&{{Z_6$ycF#sA(ce0|vqhOG;8fWoEVB$-Vj5VsU{gHeW2QB*lWq8lWVxU;z1>KW}_@GYo=E)K4Ck z1Tiku!A)&n{EyEO%H5fcl~Ey5Pfm=Mce>ER{+2!Oz6$|T=TNXe#I~=95k|eh-yB9z zsO)MmBQA7;%t|u1KHz`0JD2K145vWU@a!(=hf{z4!5>qBlSQL-{{T+rSo~`%6xSG= zF3Q}|DC_~>A6Ly}*EGpRNkt=1A3y4G@n6H!bUb4xF0$eonN+ij>5_YKRtehvFP1+= zWU}c#SP%eO)YLcNefB3FSNM|SnM>?+Fu#<+W^xi~-WgBG`Ayy3{{Va9w>^-N)Q4R_ zZ$i&+mfU0MJ|W@p=mC^b6uTYH^}TJ!?}|SV$aLV8M!G^UQrplM^xqj_pcwTi5m&(N zV*Z%FIV8)pYGjD3J@d0zvD@DqIZmUNXF9`@cN7H^W6<57z76O4lZjUZV92Oa7G_gc zRPA;*0Fl4*#fD2H@W%}7up^4K)<%{a@wY!wwi$;c-Gj-jX=gI3g#?Yq8=B|e4&?Gi z)Xq?jx{XUm%e4Y`ut^+oTjM;leZt})9!P-80_F5Z0WPWooz3h?zIm;_m`lVmjbT|9 zO4yPX#f!Z}4{fomIdFP|XyhdMnr#YH`hvDEjj*O~Dytr+5tbmGYU)m)M)q%O8*$DT z@wQGT6FZWVrjV2oQDiE`b!q@}L%22u`y6F+=}rt~SdTHG@B!=d$I&yHc=|CsZx@}l zIV4jGJPvQoxyCWFfuotug)=b#0B8$4cl+b1my&AFdI~~2w7=gmaB0277?7s zt4NIk$gDKAo7_-8D>yt6IfB3jBK7sJOXmx;a&$zFsNv;Dpz#@UxuniO!EEX@+aCP>_}->&3C>;CMqxN0t06af zO`XMTTJODlv6n6<$v;MHrixswqw>zVfL2?HoRG~`V0PMtUDm_j9NKy2Rhu!GGOH;N zsGz;5yZVYb+dqcr{0{AGR-270JOT=Q?l_0}%14`dQq5*hXI}DOD35CfW1G z@f_;8xm{M2hEO!wyC7-UlXdhsl)fa$tU}0Sm?p6x)=uWhuD}jHUbxnHjFB{H92G2O z2J%L|AZ&};uj2iOt`VOp2kGS{cwa(=QX?VFJ?y=Y(|b6$K6`8P`UNQ{lriS!%w=Vl zFQRuc0{T@!7kiZzM3ZD=+49H|7Lk@8DU>JyxgZ<6JKrBnY%-}DqDPcR5K81Rnm(9j z*e=I@JCHg0;`q>Q7iYZ`7@ zH8C3y1^ggYSUh&cu!0xYrUYpjKvt0HHG2S~f36&CyT?A5OF{!N46#wY=$r4i0|%2K zoO!zHnh-{Gp+YDtcPG~GeY*@j-*zcZETn%96U=|{;@DCKGB&RJ?grm1FPOYz+^mUfqq|SvT#6a{eC-ks4c)h`8IzJq6XDt|D?< z3!F@0%jwNXoi=D8W+6&$4R!?VHlfL`aBs@U`9R$#P`Z?qaLZTmF2L{yY-}?A7D06; z*lY}B8};1Q{QYq`3_+QtSLj}pTU$}N8~r%=_8DGN_1xX@PGRC2R zKtR8@r+jCAV~W_-i=Rdycss_iY-9aW3aS@igkJ-&8y)-L62T5yEVhWy$hx|wMFvMw z3fG_`Nj>Z7jem*cd{auSIXSDS50*;==>n?%0KZIBX1p3fSy>=y^AX;sJF4E~ZB@1N z>4rzh=Td*_7sKOef1yO+F)Ia-&E|EFRj!&M?{Tr>y>Qz^xgw6y^&!&C7L5cl5D7h) z-F*4m49Av``J+}5lR>We;fMNs@YY|#r_AN$iR6YglEj8FPEF{JTVbH90{#|F12_wCpD{Zl0a~({kWJMW_>Oib%iC{n#L)`kD2S1RFU0w4T zAjpd#P@#z=0b82?08TM44vIFG2-LKsNDYmJ3cbE$R~%s(az!T7FCIH+IOzChN;er& zULmZcjI0M?W3lIRy_{n_Lop^=c;=0mNQtFZC$+u78{h5e^2Wo*9aF=z9Z)0cylMzi z817oe+ntE}V<+N}O8E(YNRicDHZ~mXzV}!U23vCav~9|@MgmBh22!r#!;SzSKd6*(iB^#v*cVU*0lQy6%bZwbW0zD4a?lYY zDR3e9tI7ehUgwL)YI3fOwp!%SOO(nkQ!5+>DVJNwwp^95cd__S74Z+C{6;}E*@=uM zI7?}cAbOoO7wt#2;fVhLHJE|K@-wO;yn3Oqn{YSg$opZ4yf-${8Bv)_jVKA~4U_}1 z_WroS>#!_V42D`rOmTnQWMxk}i2S?nsI7BGsMht(;&A1lT)#&&^5wvctmT|5mean& zleX3x4Yz6rwjSNK!7{mdWy?aX)n(RS%{`$trO2{P6#yRl z3{p-=2Q-Q^lZP{wk1Er|up!o~p@R$9g46}?cylR{ftB7Sc@;=hB0AK-n)KjZ{PA&@ z@Z8RpDDpyqw8E_fdP41{!Cy~)_$+xzS^$$vn3tyKq=8dQ@OJ`-x6>5U_EN~b3am2a zWdkjqnHqwr5xo*NC;Y(r8sokpDo-49#yau|0g;v9)T&JaR5>IQT!1*mw0<1&LJDP! zC|boy97M6&$g*nt;xTw;VKYS_gY!hOaIsXb0JF}aw>y#dz;P?$eagx6PautQtC%pv zOo*`$k^Cx=jH2!^W*4drO{EBg@9C-DFPOa zLdTeqX@=!?qhK}f{+I?rA1 z6&^V9=B%0}nT6eoBKP#XX8!;vA2$YNGVn>)a~w$`h5`abu}Oq505%tfson|?4zQ!^Hg6OqzVfI3Dh%>k?0wjg)!ib*n| z^LeI*SmMe-3n?(GEi&!`nxWpu6mic2S#y31m(0(eStMq*NhX#0R>Fxzp|IYGJ7JNU zme3Q#;I_+b--~7P*@KpzQ#vyUfs#S9g)9MO8y`d1(B~7%XP}ljUnMe1N_7UDyBLFU zTW${9**>_=CS@Y=-FZ`%&Jmd!C5de{DsBj(d0U`9xKk&VlOvircwRx5l(bSP1x*%> zm9J{sP`bsb(`kh8Clt}V%1Ib#B!XqkvC`6XdU~Q-AdAzm1mF7MojJ11(Zic6R@?Iu zAY1h{TYK-uK5{&JIpJBgk+T_x%VqpURbd&1rj3O#?b~r!`Wj~7&Sm=fT*MC$hfsG{ zk@S@`PX1BO#CqY%RU2kpxSS=>i{<2R6s*h{DBKWA=}Ba5Kv*T1QS{u^`(dlhWr&U} z!nBsfVL4?}euSZ72K=3mt}18o9H-{qMaaCMD*24vRlPUm)TaLZ{+K_Dc&1+@9Kt7> zM^pwU%0!KB11P%Q^>51{V{lz>F8s33PnU)zcsWay%JwmFmcyy7q_Bu!d*B+K| zBX^!Svc%C)?Hp%gR`hIL1Kz6@{xjlve+`Q$dm|$1SQ6H&JvcHL<cvA^9?}hyW7o7E7 zlD?zaUrlk}So>fbGlhiZ%A+`Ln<-|~znNuHYtRIC`JY;>9vrg1TIGzdMHrzJXctl} zdfOlW0885v%VnK%$8hXmX$kby$hEhnE#JN@qsf`%a#5KI&>#|ZXj&gyG=5mml4%T# zPUK?HsUVI+Y$%>U4`D~X;QWa={S)|0Fu9)(fX7;$RC#TO2ivvdJ7dK<-reiQ`>49RK;5;v3&KsNsXY+FO&!Z#qqii>R= zWE*4LU2VsFJswM}K@fvcax3Ea_cNBN;g}kNHC`MX1XOi7IMODevv4q=ep}0KZ zJkC}oEHeK9sXCo@pqpEwJf%SO-np*F8n_Uw?GTVjDNv=7E-T^SR{(z4SHxqKMamRR{{Rh1V9Vtfs{K`E{YBIe zuGe~Y*jcOFoOC>F)hb~%Ye&qES42^wD-{hW*aJWTdv@Eu`eLs&lj+GF2J;{DDGV#y zkL8Pu>}DL(AsR(kDinahNWJ@B7CClE;wq@~NSfJIsDX7vSGTQi?T=NCPfzM&X>?FV ztsJrFWGp1pzK~BK3L>k2mH{48of`bAg0z9j6a{wU7I~=2T?(qmgzI&-9$VOagBT9^x){MUB?2naMS1n*z5`Qcfo{{V`~ z2>$@&qLN@T`kp?c807K*2IOu>wi59Lh3D}ZWen0g>Q_Zjwpy7|?hYaUxEJU^1W`WMaCAqQbboKmhH>*BSRR#t^g-Q6%aMq>-rZ-(9|#lsT8n zMU`O~R=F+iwE)0{`dRzo`C#W7GWw(G1IJe>3Jr2DT;_F>v>AXEed)I33I_1}KZxTI zX-wXAfEG|I7g&kbNMmPE6gMXJ?~EMzw|B}Ukci_1x0o@%J23!}%^&XY##h8W$(S1zC;T-#?0bRt#okNAV_+MuLy05SW3%P4Wt)&W z$IY2DIcOBLZxnM~YM_1h*#7{z!E#ysGD12^vONg(1&-7j*?v{dBaNrUf}Q21nt4q?~C zo(Q9I!>Gd7Qv>GLR(AmXcEzWQc=lbga8Kd6gr8K3o_RIUQ*?WteRsb2RI_}lIKWtv zmB|-0xkb_nJ8H5CEh7?d>^CRn&0yKz7D)0EM+B;RZ$nV9!hzg@^W04SX3uKvUrZ0dWy{8oDoGUm zR4c8aRdBaKPs)$K8{|e8+~b$$dAVaQDmpdz-aaO+vZg5V zthtt8(l8X-lUBrz=BvN*#G;=gmy8sIF&kovzbC#m!wQup2LAv*n72MY(gio-T>I)1xOrQsrHTcxNG7)ChCP$~--*d2tt^0) zjJP||9FLyd3&Oci4@~ThK6o)iW^gzb0T=SB+ZQ<~qRC`nWsA%;O6ozaWLO+_0C&c$ zxg(8MHL)>HU#p^jI=Dt&76-i9N(_p@c#gXW*C9cG!+E-y#Ah)yYGm_m2`oH z=vY=pb|6p|Hyhp99DlAgyvkB>_EhjmE?$B|mSr)lYD!3?EMTw}AB#KI$LEU}V*Mym zM3N?X2A-YUgXZLa@vr&9&`66gFscI~XQghLJnnS+eFh#mttKWhHd;!y>Y-Hww>*D7 z_`Nd>n;>4t<9OjtnTw}zdjVh`2dBv3V>OkJGZ2zI!^@e_ zM6VDvK!U`B-(kn{!zC5lVq)J(4?xiL)#ZUpJNe#KRo1+24Ud)t`GGS@3a(Est|`PJ zg@V)uz;ZkL?Tg4EXCp)zsg8g^D6g$X{$N{3`ukxY5Ld`U8%Xj*9C?rhjI}kB+;;=! zah_86X}j0#6k_s8MJ=A`Ll=h(#SW}so|BTJO5Vk`yX-#r22x40z96G4ti;GfF|jom ztJg%0AfM<5I1Ixvo0T$<#E&8Y)jC86C9c}u)q}6#; z6GPU%SmOCUPbEiV1`mX-_8Ms-%Rv%Lm?jbyNlvFhAp~1$E;cM~h`KmHJGAr3Bb15h zlrCmOQb_=8Hln?Ud%!bU=bv561Pbf~lI`S*IW`4sd6;e78s`<6?AjTnmJHcxA4;ro zM@wXj28s9Y#bXR%s`hV;TRY<(AqG>!=E1CixjT9f-xg9wB#~zp2|q?Mlcq;XaM0IoDYn};=-g*wowaUwRN=heUaNo@cDq%m`&|?Y(Fb?SFN$ znGBSTALMgyt=&|^nCAJrQ`pz~d82Nv4S*U6#l^4}SZu4Y`HM>8MkKBre9h}Z%5ySwd+pBGU!XF6h&N@jdM zJm!GaxLRReqMFD4zL=qLAWXtgSrbf&vER%WW4AiKwS&b4!oNBK{-xxZsIw76nPd{m zj#MgUPsR_IYV{UAcwfW>OFW4LDVB{h&(lFWYE>%YhX%ZY{+O!D_>bhea@sj{IeE~N zw)B+X{3uLy*6j- zL;}6Cw5Sogk1oT|bB|90&hkbPPeB!AEN-^}iQjTq`+nHzJy7T?vK~Pj8P;Zaq8eEU zARE{aZ*BqlpGFT>~NdUOg6Bq;Rmr%+xd;M!+DRKHu5Jwni}Qfq_U> zm#h?Q7rET>Yzp?mr!|Vm#!?bc_dOVTNl)f7q>k6jkv+1 zfxjuF@D&})in=T|_22jPtZV!S@P1=Eng>aW1W@3&#UhYE;PFTOeKF6?zy&vuRNUz5K&g6A zkVqoHzAtmP{=WERWc0X;QY?&H`1F!bK7+lFV~bew=;2i&XM#rrC~DWHhz9(?Y$&MO z>$$+BEds@992(~1bkPp;{$A< zF@gX@TOhZt`wJlb_@Rp+VG}=0ex{|h1Os~l3f*_xlj+7K6hcxNncdhFx+D$F)r$jS z4RS}e4NVl&DQEEG(`!>279cjgEXPkI-}>W`czAxO=(Hgad_-?{1%NvN&5kj0u)`$3 z5NRWA2UMEaG2-uT!|8qSSh4{X9yE`3LabP$wXi-$zb_|lKqO*0kf=FC%RJsh46mlO zFz$J!JYD0Nh)j~SM^rJDwJR`2V%80CNTGG!$EGx}rmLCe4(7mR~TE_4NrENl)pB) zG^el_UVOZ3mJFFO8o)Y*Cz6e*G`0rn*T^4E*a=D4Nf<`Ru`Y`Yi9C_WuO5Xn2lCEyODUr{weVE&1UkEz_Ef0bYyM;b=Z|LngCsMy;e0~$@3u5@h=+56fD!C zb($hdmm6CPqg>HD@5U(~E{~XtsUSjIb~04~xzq&%4u zwBFH+7thG!E1Z>btfoQIl0+I10zAL`i1Q8j=bU>#7bBEX_GQTworz@?bBm;~>OxpX zOPd0KDrjy9P&@wT68x$L1S7`jE5vmlnoDJubRhK?Rckj#o<8^OjncKTI`%^~3oKcnT~s7NZC>rQ0b37G<%9Su zjGSUp#y`*i>aaQ5{Rcar=ZAj~K_od>{L3tDEM$Q!frtf#8Y9@8C*k>3Xg^96k>4zq z*Bpgsk2=j6cbyDCXztg&_T1k0jPf96> z`SLJixpJ9pNF}VTF{p#3nSkbm$Pz{q zUiV~=dcDu5?~K_}g3{OQ4L7pO!?W(XNFkANvie_x-TWUVN~GO8)?; zSPRs5^ggHSiiSp>Ni{~YBDu2ClB-M!AXQxteXG+0G07A?Boap%SJLYp0@q^i=ZrGP z%b{&1wq43{*}RrqMU~L(rFy!Kpfz(>zdYez4a-NB&3<}VVt4B-kg9@C#Ma*R#qn4S zF$Z*p1FjTOpgfw3*z`BO{XMa5lFC#nnO$*+zz0-<$l~ap#P;{X=|MF8k=yAOzBS_6 zNg$RqiOiEJ8Wji{jRh3PU=8uw@kx-Le7;<)Xew7EXix^AMFH(~h9>+qo0dsy^2~|> zRBXbn`_Ui+MB?f{!P)8K4$?sx>Jkb%X`#g(RebNq*9nip#~vw_K2q%&BNTFGI?HA! z0$Kp7gRvrwaYSC=*4*AZ;qglpvo>C4M<9UY-^?D}$J7jJv%iFjbyXycm5hqkLvBZ) zuH<~N4-W7J%R9%Pc)(43KA>!J0o%~wt2}9fm!iYLB_<08b@r}uNBreKImhyvk0ADFQA5Z6vzl3CA%0}8GCX$S4GI@8} zw>7apYVvEFT(Qs^Tnwg3c;>=EcOOPol|amHhf%y;Vk!seorXU<|KIHFso=BxUismoMQ3P zg08=jrzG#EVlsHfTf*ai(i+({mQOa8CWp1xHU7BIWwK6l2`a|1KBOp(wvuQa@0=$l zWAho+Q0yT`V4#~@3Pz*yub%j~6RbI9%c>~trxvh`Xp4E<;i^jg}$0y6s#NVTr z%jKd08g@{uHX1AqUn|=V_>7Fv&Win8UyCH7L3q%@@?@ZAo*jffLO2HMfxh6HKDY@-jFt9UYoj|pQD&+0tk(o!wGS{3D_~7u zmyB2END+pjHns$vTF#nU;>PFei+>Ufz7?980TN~ksRw-us%yUc5sdeVIS5%>O9nL8 z{9!bn`w>ii{@CmI*Cr-zx)qUE{@l5CeGX(wDm7ll$ODiFBD<5o=N3L3jhxTSsTEOp z)E_E?x-WgrizkkIVy`*^WitIRq_Z$$H!9Y$16{7z4`GX(mQ_Yatno%Anr6^}fzqm{ zf-it}1JL?jHfCx#>p{%rK4N~PGVI+y6Pn0qu={mRWe8hb*RpLb=iSxOFir*Ne>1WFX5%ssySYK`0GmP@)N; zSWk~PDgJ(RGD#?vIc8MKN0)EWA*Bte2(LR6&if6!bAe{_Jakn-iA#Z~*|p=pt_`G1 z;EOzQnH4h=*(b4+%06{}d~#?YX(Z7YRSLsXa!;xHe=IIdERFry)`mBe8Wo2$B4-gP z>BSU*bSX7Olx%%#*Vh(VJi6C!0;NYrEKUwGV2%4>RG>rb9BaBQ&N300UM* z8{Kne>%SXwfJc^+BH5I*N1jKZ$_NA@;_Pfj{9{VZ7qgw0!k0+V<>rB(M8Y^CiOrsw za#evF?Pr0vwmA$Y9K7i$Nal_vEdyo3lc?XC=#{;|`R>#8{ieFOGFk#F1bTa87^315k2pW_|OYe4g9936* zT0xgdr-L&lQbo$dsLqceYi)qvRaM^K`i}TlHIIK+jBIs%m`K}P0|-1fc3 zBbm&$f~HAb*?}yFLi`|YM%Z6r-wR3yQKXR~5tvU*2#S^h>&v*i?eBmP4vNz|Yt{hr zS8;yUeR~{pc&R%#@ujg-m?QGVU^%I@sYjA9v}vpWNT4hY?CpKAQJX$Ux~x@tXe4D+ z*t3vE&A!2QKDe;UD0$Ro;Ygh#k&#&o8zGHsED&}dOTtazZt+1BkuF^%n%l*=Y&Rxkw!RXL`hqPGUkpX}jzT3;%6 zf9zL#Gm3}vIbm!x$!O+4c2Miw*JIDFAB!_9_=HNufR0pI0+FS5bsi(5M+~l6X6MY(T}YhNu31Oc;8s=V(Z4|l6>1O7gov!h_f+cN<6pfZ{@Z!IoFYYEC>%QODj>_?x2Ic@mILov6IJ( zX*LwJ=o95Z%4Q*sQsv(&stKj@hv#B;?_T`kv&8ce&zzoRapnPlSqoOJ%9ia-{V`|a zQZSz~B9KPx=-@H{w!jNMpW-udw)KipA|cC9BF8GKB8bD0N|m#0c?5Cjan;2gTLtNl z=h8Ouj}R*xN{2BkH<)ay#i(z%qBrgbzButnX`rbI+^Ew)7Ta$83_~!Iv*r$HGUkd% zk{LXfLg}IIRY3PW?BLQdngp^FQzI&ZO&TKX?rxZh_3wh6+GdPSqR=QK8Hm7^B$3ew zwrVN8f(7mUTyxHf-LRE7s;?cm_!#OiU?pyI)}>kJMD%< zlnkl~b*3b#0k_mH&u}l_gN%}P_tG?_OI?zJ1zT$>LTS=2wp!frz5OwJ;ocuJ^b;(F z+{xv#4&Z`2_r!8O82`_NEtZOPI*;@iUVGUZT4dT@e5&Cmg8P||e)b-ng0Ldo9s8BYt3^p#=> zGc7KtiC_t(unywx#=`l>j?3j_%PW&LG@_{)Uk~sqcVQ%s(UEARvor2lu7C#HzQXtR z)@3q~q1IjEZCX`>2?1>(m~%u>yWXgii)@dEEPY7QNn?-&MM&ZTw8nz@I;xMR>U}Y1 z9#OL^9;KI{&$x;<)U4Z@VZP*PJ@=~L88S_`vXN01W;jGs#x&(#XGI{RqYy| zSR-sLPo-kjqG)1lzfB>T1WOnN*b+dtZG2bhliy*@VsS%zfjnP&H^-M?oV7)yQ5>>m{{SwT ziMl;m6hKUfrKmR!2?|B(6fcjVBSX`REtQTCrZSh8%o3Ib0cPuR=8fO3_8HJ z!b-C=f0Z(@t71Mee-Yw_tKPT4r_0!7o;<>YkcW}6L?k+IaGy+2d3jdHFu~1d~6Bl*S0N7K+v3d7bGAc#)CsBO6{m9 z-0ET|+1q1$;Lzo45{T_U$sG}>QB@6GUzC%6+~UG4qeU!wV`xU9tH{h~Q6$#YZUy$= z<$~p4X%&$pQJ5+sNpzAFX&jF!8!6X}yB8v?*wTZcGU_S}0XZ%wy?Osa+MS ziRE`7SOg8P88086EQVNqb2QO9W+nm^bO{}?00U)+8n3l*1~mDS1IF?~^S|Z*)a}0#Nc8061^9*>#$&|h{FkE zm(ObjbVD3@g*r`+Mv^OIW1M4rW_;#zFA>YitjQ9ty-Q}W01)S-&Ul`d zN#r-X>VL+>+godq39!@b3+iJkf|_k%$_L zy157kdhf@R{7Cs^mtNR4&LhAKShf{6eIPF@+)}c2;w%W<03?j@U~*Gy@{> z=z)tNZF&jSVzxSX#bRyaiV-#Qwr9LemD4O=Ug3l56<_f{VSJoDNpkQkmL-{DbOfDA zT4)>eX8K}ziQ<)Vs8PE83Q`x6eQ(yP{&*f*MQqMdO`u8DWK#Alwci_TD!0dS3C1P~ zK2j5zTk>&<7TQ)*{?Psn;>vy_!VLV`vI zTG1a-zsS}L(E==_Y$hg5>JZ8T>ZBAe&@~b+fH%$@rFCF9c30)p%N%OzI*bXT4U>9k zZ_eCSE<88H6T_oio_sl2U&){XD6@TtwcKF{(Q?_yi^n2IqBn3PC7HGi;iwzCHD6QC z1mT(DX|)yVgkaZ18Yal!a9fN?N1mTVaa4T{4;ITD!^4@AJ04zTWNBf`D}^G7eLAdi z1&{|l#bNkz9t34}{O5{J6+Rcq#u&E1fkbm$;}7AAxs0N?A}0l+Z#`2=JJ)xpNL)eA0>mw8Sd;BAA&djw!FPv0nv=iI6q`+CU}0Z%_cU9H84_d-{I( zl%IzHT_!>l4JBSO#CNYK=j(}&8BEuVWJHj{VwvO9AfixF-(yq`#~UQ@sO3`5;&~aY zgt9^TZClpI9OvXU+W!DjJU#FGFZMEx_;*vgvW+@fv@9G@?8eE%(Rd6IKBj;duyzO- z*C%j!#oef55qM(-HtCh}{RqDGf##-*qQP@519a%HdmL4o03GOkagtt4=KB2prkHri zEx+{t0AV;h7a|tlP*|AXoegcq>Tt$X_)08MX>G=mT@uN&w!_!uwljytOr|$(rkz$J zC&~%p_8)R@5$c@YY#gh3VJbR+(%pLRxuf5V2Mm+GPKW;h|e72rZWE8ti!-{`HMGDl7EtRPOf51n`*h@qrZ^b<)jcse=*x$-(?x0Ln!Y z(-v#eM%;OUKP+9(CQBqB6U!lHi{k0tp=ZtIARcjFn9D^xvMhg?YT30as9m<)x!i@^ z4l%AdEvCe@-I4Iz#GN^$%W4dj7GLfoD9@_-F(L+`}Q{$e3C@_u2=A&WEoDp-+q z#^R0x9O=}XEdm_WZXH2K&~3H^w{cyojB-O>7}Cv17zjZE%8h4oIp7oa^~RXNN!wds zH%f%Feju{KxK;;V2yfhsF$X@y_U8H!24sP9yt0Fwp4yuqky<}rbTjBW7e-K zk6;ex>*Lfq4f&Bi!oizf=E1i``|f`4v+Ggc_b6RS%b)^K$3~(j=*Zu zWcC+WhmV-ghe7(pJG6+vHq;P{HP1g`fSmp(6oy4rho8iuJ{<5o>p3#&V5_N}REh+8 zlVi13Gk!5T$tGi0NmU3M2s>R7sDrVp_TRQNz8{y%80UHN<4%C`D=vVlN&r91H#9vy zn4|GMGGsFuq57GrGp_R&LqUlnM?6xgA3Qh3j8vU^D=qQaUy}lCn9HoK5ozY#TBr#L z{$OwIg9sO)E4RVa(tO)BayRw%_4mR501CjF*^0S9f=RUv_FE*Mo!$tM6BNxd{)%c~ zw?F{FRY3cE==x(OY1Uas?shRsl06iQ9+qiMmU7?viW~g)C*}?z7%7%#m!@VxAl0U< z7p3+76S+S4my%a5Lq{4bru0k$VpR0Djko^WzZ`rpj#TmsV{xg)XlfQl0&(w&38t8<%68 z((J&Tzfe-$C6T;gAqKVRP@4vP+ zd{m=G3F6(4$1K^2q;nwAFbiz;q~APm^Tm`z8h2R>YC3GQaa>;80DJLxo?0b>W{i<8 zg365SKzgxbNj1JbEN-m^rwuNG50Fr*1CB`t9BIu>(X$kID;Ano%;u3Jtx`zJZLWr{ zMONI{;;?8_T9;iiO0^tirBu`c@Ic%GIQif)yF)5AV1Xz}Wmwttld!Aq4c6E!vNm0G zqJ|Bn74;pjZsNsQA2E0rbdn?4)riG9xYDfNR{#HwCaNxHB-_N-G+E6JEh-Z+_>Rd-&gBn@>{V9~HS>>Ia8AP?2bWwNr2xXjxUk?A*HfY=vj?SkeqF^S?_>5e&49;kT^>%QB1 z?RXYXJC~0nwd=bk|lW=S)&)1u%scs;q)i%jP12L zBU&k?%(CVTuF=K}MHZN`9e@>QfO+-z#dnBGI-V&!B?}}f6|`1?V#wdz)!yGM1ZI{@ z?pYb6M?qSdHzwP1E1Ew1;YZC(ka(ptCSrJjkmv-ENF-NnyW^pc9XtJidQ?&eX>_WR zKTM+vvOyq$?`F347_R)foZO04m=}qNP9hxaJ z`G=LGY9XIlHFQAW9jK6N9e!#$Mt&{qcPz5W)rLeApst-i!nfM1U~yOYf+dn%#D!F_ zHk9LH03QDUZ+u_)T#5O%A~80;pjOvQWp3}&bA(M2;`Dy6j+JJ$Y=Piw_ab1Bj)SxiP) zwd!{uc|qKdQbiA*Ej%hWZ2Ve0q8L%+XzylUEdlC6-EFz|$4e}egJL7y7J1n^y*bJ< zz|ct;1O}>PRlWAB-mzBjMwyt5dRQl1pDK#hO7VVx9sO}@nS-T}$wtapJ7!cWa@Na3 z-+}hRGqmv#VvSMN!7LZJ-0fRzF@_RxNkr4ZJ+m$1Fg9OAXss<}{J}^UdsnXvF9KY< zW;{XTGOY2O=zz)=!>cz;FdfBF>~|P1iaN{UQXxb|Ssem~(p62B-nr9T{II5Kb#a6uG(4gyZt z=p>b$iYATa(bU$jq9U78~ww z()SyGENc=ZIR?;?qSC#q7A3#~=xF_M%JQ?Q=^Y$6o7jKDc|`FK5>Yt-ND8@YpnF-~ ztcv!>=s&|*I6o>J@1@@UGCxprnQ2sNq5 zqBtRW$J+k@4`q|`P&i>6&l-~2<)GDLi3KPI&m7~!e-X~IPM4#isTwEVr!dB}omq8kVSWV*uK0ze0TnVW1c#hpBmu{{Y1THnXEg z8bS&r{38;|bwTL^5X;7`r2>#ND-onA*Z{6**x+r>8=3;hVv!lj$V-`IwHIq(5%B=u zw`<3i7~m2H)QH;@W@`h=2r5N@3H{Hm7be+g1hALOq-B-*WeRGup^36?_aNyXQ-zLP z&lqBi9##w%BBYXih_CZ*clO1Mts!z_)fuK^=G-X+F=PBAbgyrg0Ae$`4d`KofW;(|Mp=r;V+Ea+6-X=yvI()K_B;IW z3SNt712f98%PLO_O&*{`fdOrcR?S-|Aag(yxWQo&$f_8;h14rL%GN-!rHR;BTcg;T z!?1~@c_&>A6qZ#2)rn9TYd3aFZfmyJzC7-+p_W->>5`p7$zfDBT%;|r6wMWVYm z5=_S~mg=&|hdL(-s5)OsN)I*cwWJDPQHreO;%wB~%EYqiOv6a>0lHZpVxaB4e7;z{ zo;J+L^OO0g^9rSbM@aRm?Ai^2MfW21J60$&^BA2fHkJ^vKUImNq}e4!Z9og-o$+?r zWg(0 z>A3BWpi3!>Gfr~rERwNs$j(S3YEo_ zKNQ-M4k-9^$qsWlG0SD6WQYc2>lF>=+Pt(jBoDqTkEPEK+%O{-klpb*hqeQ2>tH9(XuirV`~PIdzh0E#FsFhzM5f5;v{AImfSt zZAGrXa1?gT#F^%GLPbceOwf2fuLN(fkPlAiB=%pngM-w@7oqXg~WNqm`^^XkSeROW-5bOy|&-)jk!3ydS633 zw8VUCF(gt-7CwYm(U1(q*$f+iH|G_eAugW}%h_~4mPS=(x?@7PA3sbn<1DS0Mz%+q zX1WZ<^>^QWw#7$_W7Rg znFjj`?YB5KRfJN)>#9AqY9xXv5#=711&4Uvcyc6Adk$Z^nf<8#Zv%# zZZUF#$gkL$sy3olXxY*xmMu!5v<3kD&p$rcXDOTle6G77olYnLTLatB55K-Da?R70 zXk!mEBtl5W=SgitacWNc?R#S1!p>hYK3W+~+<-YHfk02wanC1?amt){I~6FoCXHX= zl+~ZemYb%lGJ_|Wd?QiftJCR^szvyZD?d;}CSjT;)JQD8#Oya8JbY6sx*`oM(G6>I ztYC`lYm>hpTzx0OynZEuGR@MCGyx()$r>M--Hyj|U)LNjA_`Km(fJ|wFJC>Dk|tr4 zaUs|=u1H?naCj&0gGmSG5e8W$T)w4Zt1h8eTzBK^irlx3X0sXJ%%kQ}X8!;PxjKD$ zt;zNFtX)Ooem0C#^V%Z{bby>)bLADjGdM;rl$Z4>PbY3K`V{l{b7l~k(ja3xspJIR z)z$sDA6zGs@YymcBu-4KNN~zE8X|xScr<>v)pfJds2Pm=22ubZZC`!P1^)nNhC$Sx zOuUxFgCXhB^&9MK655X&U(~*7yK!b?Gn82dn6jpJqta0@L1Aw9=E&!o7*${?I~fD$ z>yPk=RaGMBF#~(=jdn^H@{%-TJCues8Un@UYqCkUJDxYH_QcyE;pHAfo$E+J)UO_) z$)?*#-1>3pg|8*YO%CB1KFlLcK5k7jMbd&Wt!TbLnug7M555hVf+=%RIhqcX<(@IB zhnV?&Zh=0X_TL)M`y0*EktNFXT8b}F>DbvVK<&RC{{TEwH-}{4m0n5m@Sv5bD^UPm z`g`s-9kA%;QtMxc8A871b#hSV8f61DEA^e6uF0uOQt$_@5qNPt9i0NfqE%@WD#lBg zhc-#>F8=_gEOL2lpfUhs4LisZNbLoDw9p>gp8mq;FFaX(m@&zkR4&$hp+OsP00aSB zV_fsNznAkumOrVOo5S81v^t69nnBh`RLImg2Z2Mm+qXFCMK4AIGNUN60G&pqk%1uZ zX6$KQxZL9Z05{^^#{INlo@fb4_LaE{LM;}?!%IwskRG)jM zzo^6UMOJzV6|$0MWat)cBm^KiD@0hkss_2gOT}h<4BXi)ymAIb%rdHg zRMQo^-=8abP_JBQd}BV@zZ6ExBT4y7js$(^6X!HNS%$-$SIv=JvgIEzL~}(WNi98; zS~s;-?rd|o#{6?}Nz1wyNxF&5{$neW%I4lG)~*R{AhY*7{Hwkyvz{Ds{{V(0kwR*c zVHn<+ot%^EJjRb|#HBBt&E_M7sLG=jy(FuI?m^qoSH3%(%+Hp{W#1tqoZeYM>KH@+ z0P0N;K;-`bb&WV0gQoi{B69vkcj0&ulf-8-evDbBjwuyL)GV9!BoX(+dC9t2c#+6W zO4h}_?1S~+e@s?*Jj)-2W?}@p&zh1IsR+^)Q++=x-|34vE^ujMLaN0;HYt<}^cO_q zCT0Hso>F}sNH6?xp(%3`Ni9Nh;pSFFe~1>48uM~C9+)iMUYf@fR9_aSqd?t~exUZ; zU@^rY%wv2z+lEJ7q$!A3EmW47Xf+l|Tk;4?loL!kY#rBx%2 z1a~hYg(Y4asq7hgbA6g;MB>w;rQ0%73KhN!pZ2oKWC7q)T+I+weMX223nh9B6J#xCnD{iHC zz5d)_v0F|13T-}(FNbEjk}O5FX5%b8(N~bMvOPfWi@CCE6vdznz&Kqivs#dSKDT_> z^u{@JGi7sPWQm5T*$QiKP-|~mvyZ0a^6>b^Ml94VB&Ki}0TX}?`GuC#w-@b>f0O?J zjj4KNev>ufITp-Kn9LYse86=J8mKmQKM))i7tipW@Z7;4F{#R|Fw;D`z9v(+40~>P z_SoXV{4!q*6Cl=vYK=<{2|S-bII;J{4qbCOsRM|aa_q}0)w>Z+qncZ=K9%o`-w{dj zuo`q!&EfE5b0TTj$vn)mXfC(OHD7O*=MMO8P2|fl=_5B?#rAPzn!WuGI0NM;moWoO zk~XN!l08)>mmn6t4v;_Fi(Icx2(_6TFsg;Yc;p(j09Q}|*5?B`w+`&M#+jO%!&AdE z8K;S*R+bi6im754>_*$!Bk5Rq-2Ih)8zvj^(PjVxtJunA^Ux%c(`v1Om|#q$P{AeJ(unK!#9k}r*q9=M{XrQ=&A0Qu*!FzE1Eg*iX%-ew{evrB;`kxC-(j{iIm&-6mmFzq?Ee6%;Sd>PhLsWdUgmuBmInX{G96=I(RW& z&mWN!IYTSYx*5|hsX0fM%6DdKMNypG~K^S>wnWlFs&=Jc)_zFvc zLsH%9t@h_0Hgn|LFDGN!e6)@!7De2nXmAvZ-mF;$uaU>D8eE@GJ10pCxM1iSJ=_)5 zdjh+%&>fBJZ)nk2I)I55&WDwmRhWhY%0m;u8_)p#aQu3fLSva%Tt`D{)pewSYyi{= z;)M@d!mik$Ic&SE0rK-mh6-mfW@T zzWmT5<%s2C53BOGFQ_5P1TBkckROhH{SLebB2E~ z#z`hKun6jGIE>Ldn)>cQ?ef6iC?X6VITAHMr2>)_LHKwWgS?03fuh{aYwu1VCI!#a!HR^!~!Bjs4NAGuZ{hfC&ypb;~qk_6Of6`)L;8?`%C zd*QhSb<0aCjTCjnJj|pUG{J99k^PJ=?Hk6=5mb?ggdUh!m3CyY-*R@`{`h(^B$+qn zL}Ub93<(w%gMQb>_yye-!MZP1WX-^$HcC@2nJhI8Km+knq6ze@5SimL#;eki?Fdy4 ztK{H=s>a8UmpC)ANm-qA{7Os6be;#LS0BUo!qSl(C4ce}7_OGC{-U=#F|${o) zT2I%~WKpQ@6}1h{#MYp0w!R6%;Ut-)iUpUTBqO3?48-|sU`Y#f)Yq{UfPIipFo4eD zIT}35u@M*4ZBJ@GYC8}wp7wE`@scwP43fpu6#}Zf!Mwwgq>ZnUM`7)~>&|5AqDgEO zx`IGjm^fP2NMK71jkYRh%*r}l9KwEJl~h(5+fB7uBZJQux6c*VK@=I}EgKoUKp>Og z5nD;3&0UfR-pK%cw#Ux=XEM`f<&ku&GZal{Nz`4Ar5Z0{cs=j6V|tRL(yC=xD~(!0 z<|Gb;49XD(n9D2*XDU?-!>z)S3pS%fY-pX&9-Hm)*D3KB zl^DOpJ}Mh6rZ}`p!%VEQGxY}cZnxi2B>Li?A(T0cyr{1IQaX|@?MMhB?_fNmd+&z) zN5Zo?m5NC+g=1|&9L1%iAQEcW?P7^OyJEi~mXd6vNtPuXs5ZM*ohQEMoO--Y5yerE z4L)5JIk`mW;vYDcZle;!((o+&_V0Jw7aj==IXt;ln%U5Q3z~hGjY8{D_QIKrnq?kL zxytn0;zn_77J}6p?|%E7Ka%{w@m!8xIdb`Q4&6tS%B*g^L zyT@cmV9SW*i7znaSjSYl$nV%!7|M8@D03uCnobxQto0S5a z%mQo6b{?sF4=LLFVA)1Fhgh08%E=&LMcUXpS1;Jx{+pv z!O8wuEC|s^5T2h~qKy)ZYSy-Ej^DOAa+Ge2aZZpp*ya=jZlqu_D0|;;zW)GBU*-Ha zGtapQfnJD%{l=1_^0c_VaXb_@cB(XxtKy|0b?VwLc*Tt}>+X3c#-Gr!$4drj zH%(~Ik`e5NPd3jGRCVh_p;bt|h+V)N4nDST7a1=QJwD|*^sl0>Svrxvog|x5+x)(mCxqvls~k>UUFQm@12%!AdLL3ndwb#8FsUZdmrbV6 zs$_g_W0#FGu|)d~N>!ZmLdP5ti!||k*pD==nvGHoB~~1)R@>2_eJ!z*{5|-pW0jID zlH}~7gR8@0E>xP_k$=T^z3qo&kxNUMjuf6vOsS$ZRjMOlRowbx&QA{+H6{7{(B~K5 zqg1(%81ch1jKj1`De1Fj<4u9w&^uV|-tjE-GD^fOj&rXmg(2$lXjL9B-?lCM8MASC zR$dPVhx1Jm9;Zy@-hn2NjU}$ZA3eP=u2;i6V6sV@@PG_tnTBCwcVcSDPRHd(Vg@kl zourZpN=iIf@_0Pie7fc`#WS-i0)ROio+^iIc_M7uu2wv>KbV<llhJICJU_?X1gW1}Rfk|F~_rP8`pz#Ho9FAfjva1~sgFte0lfUg}wi6`#1{rTn zj^uv@=H$$Xrp>^zpF2Vw16TuM1(Ckno^g%wEY5N-5QZ$oi#sHOBP3#oHY_guUah!Q(S{pp&L2bc|cAhO$M0XK&#@ z%N`$xX3gf8s-*eCfQ`sIozCap{{Y_v@jt^k;o<5$GB4AuECcQsfo;CmUSE*Iaja2E zN$=_hDLHGQ4<4DN%EYL#vg{OV1Oq^wOWSY-4e?)<1}Sqj85-R6%{an=#)Sxx;C=0p+zoEegfHMYp`g#Gfh|~jVq4e+e#;9h;2=ToGI7&u$ z!}87`o^&8j23jGtu-&}uf7Zt%2xhZ#_=a9+;zeMyj--)+rEN|7{{Ro#{c*JM4+SW> ziL#|K{Y5$fVWk(>PX?%0%Ncy?=6p4?i{dPFc}OGhwvkC#)c`>2=PX};E&Trgo-0-J z-(x@K316Jaexs5eUhVq80>f1&=5O>urfUde%0>9G*q8r8J`L z6g~_`O+|&39Ck+0scc5k)2STN=%YCjy9FWRnQ;~ce8|i3LL!o9NCfdzvg_j z)pV;{-R(#3h8B2W&CFrUr17d#ItvEBg>Tbs{XwiN$oaBKBdL#2N0hQmuM~)j3JC#Y zuyV{akEQ&uw0Yv0Ow#4$XWd#dtQb{>+zak&{V+Vzyr}aPGCGw|qCrYjY9xB@58n`z zD7jp|X;l@Bl4(+pLy~{c;xwOSadk)Oym2q3Wp+4Avs?|{m=DnLe+2k{w{ zM%tMI!y1GJ{N3n(eep+@GREjhc!KT;Aw_)qYuU z?c3iOyo^gGJ`e01UAr}TKg9ze{VDPa@iVMaH1id8EZdRI8V73rA&uAN14A@@2=Yqt znQWjWX(6Z+LGRRcD zGL$0h4JV4fb6DAiZ7Pu@R!=x~nPinsHo2T`uO^NFQ^M{CUx_MaU&Fvs+nBoVUaHNtpHBdX70cz z*kho$@zd;28)cY?IV^OEHKLQJ{4hWZcU64h9}vt&OU0BD824Zfou~lgZZ`ww2eufR zO%E%`jAeRY5>GH-JJ{d9*A*TwGAxXZAR;uhEaOg)H#B(M?oT*YH0L8<1eKL=<_=nP zn=vZ4P1tnFI$K@x2Lo#Pk&Mrc=he~^mzP>dj+)s>BniNWfLSsyb4O{ z)44lX2e%hqhu;*_7tceIUa9DmnL!og0l>Q)>_FeQddI5qd_@<^U7YOs{UA_0xtZ9@ zsi85ly1SLsx=A3g-nJi6zS!hwLc&7zGyoYLe~VLXK<)_ANcT8)AtS0o0c4E>YS7;q zttN==zt`o0%MOZ%F5PXA0IQ^(R%?NL)t^CIW1%_Lk&-a>&SYYOIao_4l1)&v9f=Gm zHJ&`$dE=hg_F7)9WDN~my0JRbENXkK3oN< zVqGMb0O~KYorkC-dgI9X{zFf*y^1uF3E;jIWs@|6shTdL0$O9{9~OmVQ>dD~M2-&F zS_ny>B{pX<66KdRv!$1i5v+pW5F{NS-C42QojyY--Z=y(D#+0P0NX=7XF81fP=`{X zTYCf%+!1yF#?$c6;f$i@ygM`E5Tx0Dta2_~M>MaPKx*<$j#j*G`d|PS_aPS>cd2mepZ|Y|IF10*@w`pD5z+RA}VUBsCnKWoC>OG4H;k zN(r<(9w>LLMV=`{vdCGVTQe|=W2P!1)bZ>N+jqfZ50zDpP*u!MjwEnZ&cuVrMe{KP z3pet?CqM>ltWOq&%ifW^QtDCGVX3H<_?Vt=lVjd;v}rgxjHQ{v`Ne|h#DEy=Z5xg1 zyW(&bQ5cxW;lyLj0P{(@k^wXr)o;yhfUJ!yU3i*hkd|(vIEP!7AgDG7Z8Uurx4SrM zRtQM~>6cq7BOuo-(48Sp$9+I?$s)GEfpz}( zR3T+iwQM}ir1n32L!=lX=*HTm5xJnj0N%%A#~a=9YZqD|v-LAF#=-){kQQ zsL{hq0bvOdw^ymd1Mw5wsrqBB5LaDmBR-$h63WbV(A_~cK>>S!Ho*gFGKnSvLV_ZC z&bH)J5~tFr@qm}=Ep#!fsVh7$MfOmv11gioh89PE+X?1j{Ye#3loazztRrsR4Tw>& zCu_g1CeDtmY6V6bSpqj9yAfyC=s~*t^+9eCT3K z#Ev?WJ-bL-Lm3Q*mJ3AEi?40B#eR7c%##!mnMixgYNEP9a-daM6l`x>3pluRxYgw< z$XDZ7)YKN8iRrIYe)wm+g+tl zO=%>N>=urq)<>IiqWQ0C$IEUIPmAPOH2`-}ABY*S~%F|bFKk=a$08Q)5`W*%B6p54u3(qW6bBj`}( zR&4%aMCp}(D?XAGd(ms!$=lY>4-RfDzJ527NFskY1tetdMc6x%F0j5-H|n{}1ZG`6 zStL$Q{Bw!KUpbwI2xZqR6QJssP+&(l5gBrN<9LBXy zv2Co-qE7((V?5ae&8$(t{cOq~JRmbTtMik<9^J9J%lKRwf+g`h%SoiMXrXpcx_AYR zSv%bizBS$%;JL38(wk=rm{8Tei0 znZG?GIe0!O)Jv(pFN6!O3GPp}Eqo<>Bf+wHjMBkAVcK0fpaMb5n&urJcj5{4#VG``}$$A_(#B%Wq8!jBq3Bxh+9b31<~8nw)geNKPT}B z&9x@l`wor{Js5-du0ZekM~)>{1w<~|km=K4k+*&O)&!ZX+_1BzSH!c86eWW^fl8|r zPAi@c=bEnA`Yu@bKZYc-O_$1KnoP{0Q!zOtFYuNKLpK-B)B--uO(eW7|)0IF_Hcq@#)NPWb!$P;uAE&7eC>(RqtNMfq`bf z1^Dtp-<(g%CF)c~R~}=*BFOK_{1MI zEB2?&{s{3MQXq~OjGaMOFs-Q8!=3gdRy29<5b;4fNhDH)If9R`Fg^kj#Syrmdwb%( zT*rzdy0$^`?D7R*5MH%Al4_45)8&q?EWSej05bmH?q-sKh4Bvv%;ltgaY!6Q01Srz z0K)(hdmcS-8Kl)Sw6Qd3Cz3!=9RtV+9PzLpE-|Ccc)y7xGS4(~$1-%)EQLD_B86@^ z+t=lah5WNFjKa2<97OC5?il%-wQ>97w~ljhxi9Epl8SW@Q07AB^0e`*tmxX9Xw#&M zQfk2GZ$CdGfaLX$!Oa`V=q#Y!k&h!cHai5dJM2Q;Y^JJ+eL_z}%bh~r> zT0s5qq;Dvd`p!(T>y^zC0|hP)+Ztj8{{TUWJUG0F)BgaJPIJcW`gC)_m}Jam0+yi4 zKvA;`6mIU1(y^d}@sEjRBt;pvpDa`mlQPTdJiBf3^~c8iH^s+K`#@as@Niz9Y%K}zFSkmvwJ^26;>B&3RIDSWiEKllss{V14$+dE?;fwfXlGJQ}pk zo?A61JRh)y8F_Nv=TRxW)oc7MsIm>OtZ; znRDp&^R{KBDf~WSNd|`A-uO#9noPV?$CsE1ql%;rAuh~Mmrti70UQ#@?Q8iFK}J6@ z^`Vi8i+;3dtVppV8aq;*hd0k}wj*r{<^)-7dLKwKFaX#$e!FAm9s&5vJDO&Jb1dC6 zpfMg*qgKZE;Qf!bG%|RMz%$=QI1oEAl>Y#TIUb(BJPhzSJ~=xnrx$U0H5u6Ymub*Y zrkOn2jSfJo75(pyOAs-N1lXajTLJ23t(E~pC}?~i}_ z!vvBhMj3ju4V`2@UG9nc58oMo!=)-iek}Oe?BscO%&;{2qgV42LviK4=Jw;iwlf*1 zMs|6BITH-3GQ3FA1M9jt8fK;u~*`L4Ux$~snjrvA=CxvUZ6SL z-s2r#6U&O4yRkWOIWI@ZyeBdG8Mw@i(`krg2ji*$i|x+)af=YyVoXu4gaJ1!x~_(m z`Co3pb~vfwr3W*Q-stY`c@Fap*5&?}B)w zDKdqDrHz|qQ74$~wf6r2j2LKV5xI~u!pct7A04ZD-*0-paOP3kNSx)+5~|t-8wz&5 zH{ZW+d<>-3Ndn@!_Ga^WNn@O;Dpi+IvKwMI7hi9_2_J(8%;jU10!D-xBp{Hh!8cuj z@7&`-;u&y|Nfu$sJGkl{WVLg+A3Q0an=b_Gnlf%91(3$X5u{L~@qfMwIprwQDdS3z zxJYK8vDivR zcQ(V5>9zjD6V1gGEdwhLBv;cRw2wcpz1MCqNxq$vjTD*5q5Oqvk(oTLZbe;@UNW(h|!AS7*H!_PZiarNDJ7vmhxW>|9Bf;z04 zVsI8cP6dGEP_saPOk+iUXRb=OPBmiMMXP8A^xe>{{{Y_{opQNO zRn?@a6@klnDChZPN7d2vEZ>Nvft7QSx-^JO7FMfM%?^ z49>t63~Yi3s?%F-^}v_J;7q6H{8eQ#W(;f+D?LXl)D#i!N&4Y=d~-Cis*IDzRzX>i zACI4e3f}!O=i`ojM^*P;hB*BNKP?tcW$4EkfIuTxl?;N#o@m)7zPy|YLa{(zN0ns< zNqFO+6sfzR!Q5BN4JLcTyhVpcF)C(tks8VYRwT6=0*S9qJL1bLlM+5|GFzyH!?%?8 z*w6s`3|}V)^L8APXvjsCnt5eeU}R#-fVC%-17w=9V~h(q59&G05MpFxTDBw+Y6-2k zqm7Ph!KF!yWjc(36sThRcHHrQ=y6G!$ghUL>^hRlQ8fXjEs4D#9qRVt@ceny_L)jF z%+HBtBuvrFM65%`RY)5ZqgV6v$IrZb#os%b{HzigA_ASk2U))UgSXIP^YLGcB2MtU zg=H#q`G7I2e+?+Bqetb8W5Qwwrrum&>4*IAU-VGXiuQ ztH0X@mGmma7!ZmL6GVV-pHHqkGDnh3(^&h(vqQe>;-R(f8iKomN;iz#9x<| zC8c#@PVd}txcc2=>fRX?azmDt^=Ff+k;EpIV}iC;3QGg=rqpcR0nPm1&dlYI?!!oC ze+SEC+5At!b0S>ibvwl5$*dLqMqrdziZ*e7l!Nk_co-z3D;q$JhEX1zpoyfSB$LRj z%r2+^4N=pC=J3q^c~TCn&6dsFjS^+#lTxehS@kvX*aCOp0gZ-NBCr78msm41y2e^h z4=7R)tP1J)h_beQD)`3^QwhCoiwwMZOvYMM0|#EAMa!%*iKMkEBL$d+3#^u6wl}*3 z2Tocg)Eg<5lQ1=}mQaAU>{gLj5&^xBAnjObmpC(UyY)t9IHL}kawSljq^(7&hX;CN z=I4A425yT%6w0m#Oe?|Hc<3#s_Z2f_z`mKL-s#t|on@?AmEi~KkIu3({HBOwNm zyWYaa{CxtD7&6MrrAtAoRjRZRn@ekLL4f3t0XtUMW<+eV%N||j%n2AI26m1mUIxJw zH>kbO8}WrCNVFh`Ix0EqBB+fdLCV=1?g6_TbGgCvP-vM*(j+1x#TB)wXXM=tnyZ`G z@rfhrSsAO&%oXAUk~IRS{{V>xoyOzmff>VxW@ar-PUN=C+J`%i0pESV?Sg)uyGLmg zVWiiea5U|)uziNZxWg5cNEp^->a(h$x(opbECYL4v;6Q*le?-q6CmXakQJy_{m&+> z3nH(41vN4ymMDnYbd7+zwz0nb@9l5YFAOEfFCYF6?>mUh9O}x$U!9oAMm`^tTj*oZg;!d zyI?TKDayuV*QCb9fCPd`t!#E>Rn`duY8>F|^oqZzn2doMG@_WY-&Kifbt+AF)9qaC zwkEoOlSHW%Q3mu4O6y<|Lb>4mF9wwY!eo)dMU{%}1A_FxyDf2HMUjrp3#8J*kD$G* zYH7xn9Q^3DSp#8sT~jW=$qS6kak*g33#z2VDJA-@k?OWg%mO{?creUKXurh*Q zk7~O&w)=eW#Imeu6yhVGO41QX8G%kQ9^}K9nYrr zZv{maqAs55LlDi?>6UenmLkva*bTLfx913Eq%kZlDO%BN9aliIH4;y1z1MrJT?mPB z)+mw^S?`3zU6NkcLd3m+tO7zo*o5Wvs_%sr1jm`tP6L86{q%!@IO%+Wi5 zudrlqAsTfrprUU5v7VY{%o&+7%^OPyw$U3ETPKxkW-D8p0=D%wxrSGf15d6iGHF>O zO++f4R$8gFo$MRmZuGf?=|gf%&pbd$3p0q;Byt#TR`l#t?nx^^i`Wx=hLNA~eCqgS zR$1kZ7BiSZsx2k&k*cVj$9o&|kB@lAiO-sw@=@pJWen6|cK5TZRSVU{anCsV7vdbm zIgI3aJl<+CmFbdUT=~)(^?wrp2<$yazB2y+h+>U%tmDKd%DZS%>6+>>G{U(ej)n9) zjCA}oqV4+}VRKM)yfAB+x+j$CV9mPS4l9H6yW*ghx?vcSJVP!sF<6s=oAlDmSJ0?k@S)pvaxrSn`)4d@UjTUBV z99GNQ9)li!3fE?NwDe;k$!7DDEGZ1h2mymRR|T~m0Tsxuai_|73?3ZD4C`ic(inQH z9BU4#1s^%=r)?wRJ?gOm;zKJO6VES8D>CX>mR!9SQKd<;OA8l7Z-{0j%)=0t6oKDg zkjNlaCif}=>^oM%uy9}mbzKkb<;n@t*7l1#Xf?TNN5!~jLmG(K1) z{{V=y*|7o);-b*hkjBp_2V+DX3VRFn;}zZ&;GR3;a+#VJFi1gC*{+0iH#RGNYv?hm z@PEJ&_;HN*hHJyJFsq3=X(%nHskYAQ4fBp{{uCe0D}U5GWlm2!BF_s!m&-J+?5;Bm z)38yoz7$a)cJ$t`Y`5X;--xbZ{6{q`scy>*Sw@WkEDCJ9KHF?fRyLDoJP*P2Jb8gV zIFCeADQYZ9A!F7#Jl-p_pY;SnlcrWlvVfKmjpSyu05G`kY8?9UwlgWtPvk|rqB8y# z$arPEd03`$>e9UVKAMC8U3rfCk2R0q3rCmL`JA`?+bNS|ReeV|@vRfi(i-ix3}d`! z@#Bp5T7MC&&~;-Vjz(aINh?}{jr)=OhBMwF_}eg3F&XnJvuwlCqS?o#~z#(7OHk@QU8<9&Hx(Ms_^t4#=z95JpnQQzs?z9_s; z@w5QeKq>pwL`NwM>I7L!aVw9zKVKWaBG)BOHY-Efh#-+1Ei(MK_kBvtR z1{NkksdYGATkmA~fgWipS`kyw7SxKagf2%#1QLGO&ne`Sw5T}B3OsNA%7{ERHWGo- z3?*g+NJSBKPqepu>;o^c`VLhP(##=8>4FMz>(+| zkapg&UE;aC#5u)5m}WxKLjM5%7D6<&DXZt5{cw05e~goAi<83O^nQPibU@#!b0muE z7{4~9CduqJ+lsU8i~j%$StHbn8YrYXR+n}awfb(tz5ZCN@bcRHN*TheVU&e){ClZW zwGUnW1};1-^h2oqJ!M6ZPXhk{n4|T_r}Cxhjb1cuU5?2I!;?7s{n>yL=%*PX~4f<-xbrzy^5;iA#BjxReBAO}8hIf-nx)Dk) zgMw%iu=(%piks4ikeKC=yk}bQqL>=?vtMjW^9h%1#S)}M>hg|vy@z_gug?hhYE2Qg zk!dbr)dac2PTvedyxNAKF0A}RO804fGZ%QMKpjlO^0*cN4^nPMxgzf2Hl zn`#SIf4qa-JS?*`{GC1t1+NH62307gPrI ziw24;&R;gjlF1_Qq>a@@kSOy&pZUR{%gLCFu0zad1Ldwwk8@-nz8pZ2t7Ri7gfjx= zcV#1DQ?UZarvBJl%X@np!L1dtW#G*fX`_`{xr!!IOKUo})!NDT#eDFFia3#iK^#ZO z^8v4$7i3<*`1q$eWR?7iz?~zC9RA}3k0@h-i^w5eHlWB3 zt-#{Di|_Zt5y+^=@`g!hEkBz6zmwnOT;P>Nb`Dakv7#2{@Gg6PVAy@JE-P47;sME^Aakjg1>1 zu)pt&TzG$h!{J#scyviJkJO$}s?0^50PjPdIolbsNCT*muTD@wbUXkjuQth>|cY%7v~rtNQI+ZFYCYr#qNTtV$h^<#O=rm53%l z2ISE7q6e|Z!v6plej`s8KbFcF5-^R+62J;=s=v$MZhm-g@XrzF&B-kRmV6_(voR#G z_=a_)%)W(U!&y`0)l}t0-5tp9y?a+aX(VGRXwu36LP(_+R`2JDPa&0>RSJSbq{fWE zDxMU4w;#{G3j_%LD`!NJblC^a0px9d*!nC|iZ~MaqycQAM!%`lH`r3?Ci(V1%i9jb zu_SQ@)w-EkLkiT#O;@l0eXy=pCTZh;Dneg&akYVUT%JJrk?V% zM5CuHM4HuLE}yGW`g>zJlgjk-4IYsz0;-605q^Yv^S9p`s3T0HWn^?yByJ5e2*Hh; zYxE%Pe%R&tQ%;gM;ESexC-Aq91ZZ-ojnkNe5slQ9TRPKGAPe064|9*WeinigG*XiC zM>-giSsWKCrhu{l9f<<>`ka1bh`w1AavX_VA=ja-ZZz@7&)a-`qwqiCzt81h%I0%0 zK5kzuOlT{|K-d-qk4v%Xj|u!s#%tz@){MC=A8X6Nnr9#unAe$P)b2Ep4G)B!I!?r! z7;Y<1~TW6F5#Lbkx5n>8MB33(8e8?h$9EniM|!O49E{^fg0*<_MvKPUN_qN*8o)LDrN z7f}>A+@9DeV;)d-8P+6KQp|}>r(nbl#fA0tymLO0%HjzO(nYC3e9WvF4*Kl3p)0)r z4w}M}<`LkICFv|@P*4r7n<9lB_OAZ8WHy-_8gnDGrZB~|8V*4LfUu^okfYaO_cScB z%F08$hC^hN$-Z{ee2_1S#a>;-!oae~WExz@r7c>j&z8tgKqTyU+iY3n;vpmGqn2OO zF7X#X;e$>nQTIC$^TQsB8ICb5inJw^of=6N%th*8N0*;}U@>7FW-#Q&v5ia`tcFqm z*{pjF>k4J^h`DrJn#N;fW?#}%)Mi^Y!H6Q3>bv{l=s;YvJPuba3!x1E01=LXp=V{> zR>y&f4uNbQUKq;&k;VhWY$S(9khi(o&GzSSOf&g;NivAbi$^OXFA7vvqD5`=1Pj2Y z3~k7K+;Y1lf(HhXjaH&)>~5ES6h`C9)pdScSW1bl z6EMv2Occh_Pv#wo1Pax~0bKf;z|L4S#!RaJ0E=R2ijdB%YvU`wm~MFh>@Y4iME!_n zA`%9HRrV3|;Ff`AZP+N_9ftc|w-#=14eOFM0YE4iAp=Xkpb6&t`&JwXV2>{>vKZo7 zbrIEObs!5W2)3hqcHCe~34|oA8C3m$jD?%*^TcVUjrSp10CaQ_6 z#zQfhe6+KsQaKiRLzXBAZ6F;0fC&R&coo|Uac$7V>t;{K6vYhB6p=f$BlN^AFHuU9 zs8HLzlfki9jE9PN8CjD*;tud-;*K^{5b4n50IV&8Q5H=azu#Y(Ft%cR+vU=E<&3W7 zkrp;Q0K`}tiT5M#iSfS^SY9l)OD!bx9KTLC8D!L;UiJ->RebV$$qDBxYuOv<#0VsA84m+9ULZAOL>5z2PM&+;rY8A`GDA$0h*wFtQAvAN-uT{^jk(6v2rWe!+@|)}Ch;ur zjFe_3NSrM$#0OKTZMNY3uqN>rXd-!~4;)eyaEL>$<%acJi~aFAGZJ)UD;{UcuNx3{ zQ`7|i0Qu_I;&D?ETrz$V))vq4%m8%U*!?azNB+4;uAwJoJL(zMbfpg z5wW}Zf!~Y3B+eN`?KWR7Nsr;_k$!4D`(5_L;dsnZNazb&YYGK0(QU^AK|ArzCktxJ z+_30k$c9M)0@+TY$NvDTpIg=9%cB;K-gj(^Kwb_z~Al@iAz!0dFn zjgM2ckO{iIJF(av_+no|lqpVo2M#Wg4jP zcEa*w_>ggCj9ESu(?`)^f^#CY9RsXcLnhWY28kVs-uVZ<2D3B~g2G7k0?wr01k>^8 zNYptNJ7Sl>^K9@8q|-(oC9usNtBrWv`jJ)j#luM?u}!I2L2WuunMf8ZN!%Y%=y&5B ziDH&1H6yIDMb<%z;Z;;8+KKc3Q=CxUhRwBr2IgvM|#67s}fP z(kqS69$MdS0oxW{ALBXC2$fsI=6L=VPzJE1*$hVksJF_#z>hH+W+Yivl&-BSRyLtS z>P{@&ASn7EAWr|5R zf4Pe^oZ{Mv@iQ;v_IU6vTKtij?jM{XKYP@`# z?eoP}WHw185|(J#xhlIh&9w@?)wjRU9v1%ql$<__Kc>YULnftUBVu>?Z_gEvSDP2a zhun4?oE>^TgV%%dEiA-OC@if0{qE}E?Rn=G)H`w0azZe3f^Ql-|MIqWWX*4+& z3k@oud!BeaZ;VeXmc#fwM3=#H@`#1U8x9Q8Qn4bA_C3F`i^x9=Hnx&xc~K-qT``FD ztx-JePWAWihBEoc@>zSzKU}d2WJMpR<^a(mha=RS4X6Chb)yZKKu88S-K5(5%vk9H zy$$hCq#52 zD8&joKx?YIUrtHFJ|`@%nL#A17B&o2=~P=PFKxFUKKpmUhE{ftL}^?x6rCSFojQm& zb~iqQ`Qea;M6JuPUyi<|^ga_C%&ulwzyduH1ya8bqAY=1*YxKN$>3fa6NJuAqf(P{ zuw$_&dMCH9t~2qZc_xwDs>#(_N7N|nq_)LDBoVRy04^JZ43=HX^kHK1Kj^7<3`hi$ zM&q^bz7v-gTcsu{qPs4Sg7{m?O!>HTBpL&x;dm4|0`UG}GFc3ikCBTlB$0p;Au=Mh zFxb#)p?^B8V)Nc1E^jcSESid+gp*_eMUSt`2xjx~x|9M4SUIv6Nekba2fqIRVT3Om zB&G8`6XaBEGrx&EA1SCu3mL(^F$6_!KqG7JF`4mi#Hx}E)~K?8{{VuF=0=c{!rKpX z^cbN0+8}A4IiftfeyqkIHGoI{)=t-5_Q%cqZ}AiOws$i&sKlK~L@MNw5EfNba8CR4 zzB^yT{{V$XbN#zwJdD2zIxasFek~p*{U{?vg`{9z!E(v42cF($^y3)#rbd~kTWHaQ zp%(RC%C>0Q@2BcE+X0nKLsn`+1lGe#>d`=2n)cE*tM;rPpkToASdB?TIiWnfH1aW6MWRr3e&Bo9{3|6}OhS&H30 zuHyLj7@l$=*UEJPIatn{UFdSho4X@yD$PPLp@QiO7^ye8<8MkB(-mk#bi?HJGpg+w z3>5)iDDH0d@B3ogDH_K*$YTu>t+6JKZ5q0fzZ!|#j(_J0$oRsBM{ddG zAxxalvd-7jXIUt-SmWDrHr|6AX}l*dEHtwXFC)l~`y%xKU14_kMSw`|MP4X8BP{DC zP`-wjGb3nu4#}%en{qtc9{#&yQ{n8;$q82kce9B@o$2_3E@v$y$dZvJXUeXM z%STuWPaqOTIXNQoet*%jGOeTN{{V-->Lz0-(d8vsGd7F>c0Ev*XrZvKzLi(}zKh`h z0FE*qCp1kKV??EvwGpm0qkd9u`2_vDW8m_5j5By*K5H$Pxj#oMmzH#pt9f((ytwJ()B8HHD+gbdx;eU_3Pr|cx zn|UROt;sB|#VfX%7yQP#=JCDoU;3?+G06G(rG`h7P%9aA6gB{ZV}H21+Z@cE7Fc>N zpYDB&H=2$pvkk9MZE@SVy(LXEHr&$fTZ-TeR3>^%!7@r;%kY@UU9|yZa&{uVUiie% z_}3}p8AxI9yw{#$$!fZk1r$kNQ?MIvi?0UF!yHgFa=wDh`k4^1Dha!*2J6~|VM`N& zwt;NgLhT%EBiW7YfC9<`S86nNB-H^!7EPCjHwjF!h?z!+iDT3w%A}UmE6cxoC)^BE zcy@14ikM8Yxh%~lks|3VKqL;seTCQ~oM|%Ixa3#`ohnNk1*k?rK~=XK{Dv0os$}b- zsAgyBh;+)4sE9L=s&0cmsyhK+J@CFmGLYmlGDH$}XoN|ZSrJPTpwVJLC&~?)HG5tr zSSf=r5N0Bb^uYm0jhB82_NzXXk4VyG*lB|#vZ*o0)jRax-B5xTjgJZkd<6@V(QuPxpAcPpRNL58Lg2*X{kb)}K z$nABe#nT;QA?FABWiG7MLM*V zVt}iVEQ&i-k?)GVl8!cBE~JXV=@j+aHY%t>_d%rkRc~IKVMSMMQADu7RT?5*U`35c zs(TJp*CcnxrkhkL3QIGPKSmhQ_o6FNTN~LN-Ch(kPx*_<^uq<#uWbD$)MKC+DAVu* zd}+WrBo*c2}+Ax--zZK+F5fMD3t)Cok6d9=T6%mH~M2c;(5q2 z`G=FOBf_-0zcUytPWDJq{$X3z#|wCdb*9eDkPO6gNaU#Aka0}aUtwbz?-kEx9BIyG zWt5T`qmD*YQUw4|bnVUkyN0B#8kR1LnoUNbNF znU^kN$-~o(IcIhnrEy2%HOH_#@rr*C%tsR_oRci9L5|2~z6QfQVu;)X=JT5f<0J;&Ll@!FNq?A$|F*kR0 zTy{SAOF!Zjm1y#@v=KNItHB-p#^j%_DswZ)*?5WuM|Pf2+b|>)1&0;ze#aUkigWGK z5iE&)%RKMn33AA*l3&Y7t!wh(MW|Cjo1Sa!ipT8J1sPA{?YXA;@6ZXY^ zYB2$xH4@6hj^G`R=j7i)-#jqU%TZ3+WEps)I$db#>afy9*`~jzHH|Rw@j+<`mTAjY zLO&9kW-?J_G6>X>vbhKARL!>Ak8E@EIk;iXC*{#d8UR6ILV>K4-;Z8F!BZ;v5hRcy z0d+=tqR1nixIS0zI42e`cvt28S1**b9!`POHK$P8kfMe8knPP8#v1rVbOLjJ$_cYF zB$BJbt)xe!5m-X2+1SE`0)><*^R%C;88G& zEdVTQOVx;BKvB(7y*^AsT$3d5*Jq8D0eGibp^kj3f_(T zr#X=p48~ZP-que$U9yX>Y9M{~?}l=Wk>+74L#xjc1E;%aJf6qRd!EO>3ZdtbWQ-|b zLYGCew&Q;L0(bP^Yzl5Hb#jl~Ys0fQBytIgaOzd4DPnf!*b#oT39fOji1pX>T_j%4nkQmFuxpy$$EGp98nCWvGywtqJvvArZ?f-q!wRkHh23LW;DStsav7)der_d? zK{usCd0CrIM)e(l@ATW((D(B2 z`Kfe%nE;dJvTMK0@!t)2ly0QS64z8pkpUSS8x}O!9CzNk?~Y`$r!xriIO(f=Jl%oY z@3uISbd%`qs9)ScvXi__dTPLmAzJJb4?ec1_V&QPCnfnD>1UA~I@JK<%k$EE{{Wj8 zuaJ{gVm~>Zc4a#fgn?Ci8neB6dSH2!Lm87YBN`NS8o@DJi@m|U&e&vgJ)~!z1gZ2x zA)e8giRMOzRXgpnks)F|ijJT@!0(3)p|YuLnSwI(NdcJ()B4{7pKfrDn9@ayGSV?C zqU5Us%!PQPL=bOd>xP#olazIR9XHU*r%3?N3Q6E-BopkvSt!O;=oq;!wt4cWF4Iln zf=JyNdQ60kBt2>;pW$JG<$n?9vaZRS&Sw%aE~uppK_uA$faKQuUf}%k2}WT}PJ;DD z2?OEt`G*{kz;W1O8Mt9}V;qr23rGtvr3OWPE2x?jclO@|p&7*~D;EA)IWOpJ7c-xR zRUe(rD!EX>X4DNGi0#cCxW9Z8Gn|(v;9e;&UD{}t57P)qL9C0r-t}-jFBts4iBI9M zx;D8orPv~aM_-V+Q&hTHlvf3I8;ogU@ZsaRX(aIeo?|OKAt8BXDvr_0M1fd?WD=u} z)r@lS9C9mK{l7M8S+T5buKxf(x8put`6(V|mon=mOC^wLZ0y^Ux!U={Sy?3f)F~V2 zcIlvviKhPmFaT@@%1!UU#e|}>4;f{XNpcWwZqDNSf411H@PVr}J53W8MrhFxT7Vd8 zC+d0j$ETIkX9FJ_OQS%ZBr-h=w7~gbTU+g>`&itQi_t8Ty0nr;v53ajYmiCw-~a~r z&gCPCzberccatu%u}}`6&9U~_ckh1KzVOEoCr(CQjR4oppKkn(w%BdPJkOI$X>9cV zK0RCLK1EwGDd_;E<7QDH^J?F4J&6QfG%^t@10#n@^7Uh}UZe^>q;Go+RmYHcGf9}t z2^xI`Rl^@LGJwZW*@o@C@#%)LjPt`6RT_8lM5wh|$s`Y`721!>9E_`zO&cRa8DuhM zgG%6#G%R!w0CcDTZgv}i{IPnmK1U?&3%bfR(Y+L>Z4`fpVZS8aGS4)y2_G!-Dlz>% zyu@v}p|RNSzBC>k;yF(dhycVg$EDyX3$p>u*SSA4^2S{8on2^5lTONsb1`L$D!VCJ zH4vM#5;m*e#^Ukeo^t{_KB&WJ1xW;Q4lY6h-> zIH}8JCW|s#G2zJZqNr%(RjUQQys#kqV?;g-4FtP(15+z&nZ~*}A#e-ZE zi+sLor!2g@t)7{(@c@YiRovz^Ay*1?8gGYA{4sCV}BWVxav!iNxGoQQdBivQ}J#$<46Q=x!)NKv-O@v zU(}vLlASSp5s~I$$E6+xRUPpE0FOfQXWOg^k)u6lW(CZ;jTYSbNdSGgz&tJyAsLLs zam}d|wT&Q>DQ~g1-u*!OdtV8$Ef2kjWx~uB?TWxFmoqZMm-7?Y0RNVd9(#3y~ru zY{Wani^%y_vqWE%S9~*VZ?SS|N|8k_UPP1`6iO8tkSINt^&0FpzVr`#P6*I+#Eb-R zu-3q{R;{mX$rL>StT#4aubquzl_G)qOF-z{fH>sx)NK4F>hMg4MVuGZvB<8iPAC@D zq^LXZ<_DehpG-RG2<&GmCXBpc_1v7h098pg0ik?vWOvwB0L^J3c-8f{#0`UdHQ3yM zcfnzjXkv;+V_E_RB!Hf7q4vimD$*GXYIRL0LvF+_o4;NUxH!khcE?|!c_<<> zGc)xW11lXgy#D|R+>_gJ$;O}ZOD1+4nRaE44Rbe7u%bx5IIL87WM(Z#r>&J8p495t zd?cRxUNpH??Qp!%Rudg zvnEOWDPwnIneh^+?;{;r3ApJ`AnnbNH^5)T1W;!K$*p3QGSZlVR+BJnR`fK1tK@3~ z#!}bk@BG-r=&~L#`3YokAaq6xJh35kW{&K#kOGr6r& zOC))@^iLW@w8PJ(nVW{PI3VzN7!0^+(qEsS^fnnUY}G@UFerSS{cjI~BQTD%#^eND zCWTpRVo%c*IlhmPB8@uXff5D?%)|~93IGOP)^L`0Fo+X1DS`-GvfS|k>}zi{dyseB zfxYvIWT9!T%MrY}4%@h^yYc~rMmh_0==+AVboQ%%@d+>dNlWu`f|bCc<`sWP>! zffxW%)njcmDF*ko9x(hlm|&hcCCeCc*>Dbt)Pl|!xkAJQ8mJ4^RHvK5=LpGDh&-A$ zei8owRWlh_;#|;zM~6^C(wLmP0&MC80y~vGhTCIh;UD#A03*yJbAUI?NfgOnho&I%;XullC zn9edn%hg38CV};_1QlvKfS}PJP_cE7%`cUgEtzPCh-YG2kTZ1^)R@Yv1y&k})Zh*I zB#s6R5zL?lJk%;^?rW^bYAr`#KDD{W<&1}oc-~o{jy4Kq zm!gr!Jdu*ur)4U@62$M?>zr7@_^ZV-v1S+&x=-pb&cxv?8fzeViQ0kQzPP2~&gn2@ zk@`)1VgY6ccT%Q=asj8ze6&~Fx2PLd3;zIjGTJj3l^H_G)?uZT6(yBOH%d>8Sc?R; zUH2E$rWgRl?O`U*dyVwtiVxz=7S1*@XCe1PGK_e@~ zvld1rbkG1<^XPBlB96qIe1w10N#v1hFEp|#)P=H64x(tkHP5E_tnnYkv&kIM$Oq>o zAwr~KirG63z@GmAYz%x!x78Hi9@)L|@5JermJW_aEUZT_sEHDGRu%&D5}hQS#ov^0 zFBne{@x1PGIpfk*Hc2H|0<-P73#?L${+sS`mY+GA4IX2~9Um~t!C{S5Dhl$89ryjP zjK7`F7jqQd2B3wT6kt$$ByUv8C60O0Xmmp4|V45x8DeO*N;QtQ#4)|3rRaBlOdJlHS@49*0#n{ zeAJoAGcm#&H#h@IL93*PR{DuPm$o|id}&EtQV}VnYBSzFk;}#tBl$e8S#@fWqY8du zt5Y0cv)(`Xi3<31AnLFmK?45(^lg*^Jz6nDM;IWX7h@tkl)88&F-3{Nq2K z@m~+g%hK@Eu%aZEufUqY%c518f|PpDj$%@h&f+LYDhhzQ1rR{093N`M zW@|J0tCxw)ZbCB!gb+;vKm)Z4Aom<@aoUC%OZ6D#ksybyl`7lWJ66?O?tQT+#+QSnH3=kB z8ZOp$q3kJ*p2Q!XHhd(E?rpHXAv7QO3zUqYc%+y&nwWJP*N`;set0yw8Q&&voW7`f zjGzT`F0uyV#WIq8>ue9hBI*)U>HNHNnA!OF50s7j1{Hjsd*bGJx)5|LTQsW2hbB!c ztG(~6SRi}@eXzO9i&2P4UfD$sMh2WTF_^?dQXLvW20+W&$e?-j#*4zdDf0j$w@xV} zkt8|`6^uyoi{F2Qf%7;z_-q*;3ywNTie;ve+GWiYaA;TJ5W@j-PXZ%B4DH5dl$<{l6AVpCiW9dqveun_a zJG8k;KQj{fgwd#C%#y}+1M_WL_ch;aIhN_>GkKnzYWZ&tj!h0Wv9_anEH3As1_>NF zpZkP7%2&A;&&gZmjdAEp zJ(xgVkj9Dnp;-Z=M7s0291pGR-qZi2p|?)r(wV*wHoiPuKNS&f?%PH>c-Pf>m4&%rI09^pu1k)+waC8 z(jm;n3WkbDi2xU6r1*=Hc--tecJ;(O#w@DN$vbJ(>@1K-6?8ZC2iV!f!RbCqS7p+& z!W&{JrCH!@Wc|kYwb4OVb(K$6U0rD8(`o!B%4~PLv+ZZKW3W7MPCqhC;ZjCojBF53 zbI+5x>_*10C<`uBr0N_zZDfK=1kfmYd_MaeGnZ)OB_(#8$*dD#D>aoCZubNm{{UPA z-HuL)7~L{6A(2;8l{Pm5i6qr|jo-NK#u?Un@-rTTkO0z5XK+E^=R}W?;;_6f)pCYn z6NXwMl#&QNhzvf1whCjKUZpqd^i^afgDE8L#Crw@*x+8nREA-XUra#EG>eTC0s&IC zH#9d(0E_bvL5p0BlA_5JGxXxkDwcTyDm*}zwg7CsL9siF+=F8j@+zy$tq~IL-7qh9 zB(|-+(2MzD$a4|Lm{XEg>2&M?W|Y62n^A3K=@tAwyW%aznQf&jM*jdNux3^SW>#{c z9Ei!@sh)w@U$3q`0>vI>g2}1JmNJc6!BuUWji~Z>Aos-%PdLVQpC_4`5ZRd*b3`48 z3{8VttG^fD4zGw5f6Yi1a2X3KY+CnA0MRzB>0IrPIg`hfXC;4@daPbdXO1$j$~01W znc|OBvdDxM52=YDC>!u=6 zX*zBSv5bW%PK6`Wb6^vDVLbFOM+};X>q@1DhbwG11AT>C{{X4OJ{2I9q>o5h)-s}4 z#cn|)nigut_hcG3Vwv$oE_A%Vm_Y5vaw9*$}Ed1VG1*iN=lKop~sa;aC?^o;F z3np@J56r`w(#~@18bq=HTlh)u&FzkUe;z(U>wb=>i=J3L>)G}UrRhmiHELKIQpieI zbGidvuvu3$Ol-2VKk*1>y-i)o6?JFZ8LXd;GZ`QB!W2@h30f35uU&@Tx9f*n#HZCK z>br%IM<1BbQO|w$t&grew6IMx(e3-P+b;~Hp0-j*kyrHOATu!ml!^zEPS^hcE;ojS zDbk4Ca(NK#W~`C9^xwWK^1d;e5-&nw)tZ8_jbEK7ZorE4_4UPo@ksQew27&dfNOI= zd6!n;YV;N6#%ZjV{{UeqeMe!;O9xIdm<-Oqsdgr`{?*PSlbPOSGME}N0MsH_#p=Fy zKMjvf@OksX46IHv4Vr^k=TQt+tX%_j+@9D3a(HxtFxgMbyo3z`S8{x(%rCcY_`vP$ zuk`-_vYa2#G;l`7bwQlWmPSX=;r7(5IAL4X{fFOW|1t zl&pDj30ovQZD;=gwfc6%;`}{6b&BOb?b<*GUZ_C=%~%_g{oVxGi6jN2M3+dvC|!q_ zv0EG+&OMbdbdISKqhdU&qR$|U=htE16rMz`_x(glEMiakZ{uy33m#cWwrc94_M=;T zzosysjDHCDwpXPFVmIm-Jj)!a$4GAE9lM+M2OGtMDk67|R4yW5LOnZx4bUF`-LZ=N zUz%xi?uR_leqSyb7_sE)Agx)9!%vn}g(*}AwMMX-O&ZvaP4j#6 zi8y7BNaI}+5FNUwraYp+rtD2O&$V)Wu|9+|Ihj8$9i*g<7eBbUKxoi$>m1;U#=MX1K7S-RFebJ^QM!oP)j)*AD_a1Vef{RHfN|-Z80W*DX&=a zfWy-2`%x8mS{USGBFO9`c-2d)`NM%ov^Tm_zt8-`5iKAyYYvMRn80Ez17Ho0U^n@1 zSVYzIHWZn4k$_I1OBi^w8kU1e032N#KSSw`9(VKG{QO3={#0-< zL6pnRR3t32%({aEbj|H_0IVtzosvA$X^ry%<)Uq47preghrTPHG5Yez@;s>NrkDT? z^54rA@hj&Qz_R(777$5L8CPO|{V)Fjbm3LOSXLXwCt262mFh(!$cvwT3X74U{RLZX)pXnYO-tW02s6ULz)Sf5ai=X>6!?bD78Su0e9T)PAmL0 z1(PwH3Hn|i1;wj-W3V895ZIdIYQwO>LL8bZa<@p-WIP1)&x7Sobey~&nKEo5o?jnSEl7MHoO7Uvs%*>Gj5vP!}x@XvQOCbrGeM3cA-sW6%@1 zBYu5xlx*T==4|Fx6Vi_X%Uv-K5zps5QR7bs%DZM}j6fwxA4p=MK(Z{c?PY)@cIICznn@z? z{G#SYXEG_(^Il;Ywgu>E9#RPdVX(e&9E36FGxLe1Z~IP40dJ#MS(ZRZ)3IXipwR3F z`0M7(9t{2_?9t}@UUG{ws69A|Du-(uu>^s9ZQAhh{{S!T{+j;)+kePvn_DM8h{j}{ z7c~xWbs8XJfsiUK68huQV|uaWBzLSyGiFG@n<*rDXNhHw7tWxx1c=B=Tcgqsdj839-^+hYOdHX zhZ%AiUI>tt1wyfD8i*x{LP-9ISj{t4l@nT_@Y?}CfbZTab1d;}2jy(}53`Xq1Vl_kfnCuQJGN^NTklB@7 z(n{KL@w$Wx!_TOdqo+-6-@R7w$v#^=v&Ed#mPISlXb6dkci9VXSH7m~FPvY)ac!5* zEf!gf)|JgiD482H$jDnN$EhVhnYOy7vCU~8MrQF8&1UD6NQQ*51b7%bT-2xtS_4HU z$AEhrP#+PJJh058ymRJdF=cY&GUtt*oRf6ERTas;!w%&9Qe=Q;vr?9qODtO0o*T^M zZfI@?%MAEBwuMhJT1C$JIN^pV1Lpq#EMf%|=!jS@R97U~X6D=FRPpB5BGJi|VVusQ z&~|Mqq}ZhzM;ic7HGpNz{wJEuu2I-!kcOT{Z84&nK4V6S++ulm=e}%XKEp zjkB{m1F5dcs5C3v+k98Utr^_xv&t3le2~q4BXoHSYFxBzgoEhpO_S^f-SLp|&&E^d zj+wy8#-M19D!m|Z0NZjshWJ~?yjACBT%3YwWnu)0436~X0sH{ z97qbH&?fupJ8{PNp>Gt-<>HxgMWSM-T+YOW8gweHnkLEIdY?gz&SNyF+W z5XO>7$2_u-DAH6V)jE+XuCdRU)f&H22*a6mId%bp zC2U$Y8i_6CBoSj}HQ#@xB=91+kzr_;PPp|A+e7iV+~0kL19)pE6paKZrGvAH0Rv5) zROENHto6tblKFH?n@X%RGrO(T<4ZqLV3=iLTy7XDMNJ zWZ?ZZZRo@gTwLUr^Cv1DUQrBz7EMBpx`Vc_L{JVFnVwlzNLkE9G)6T5RYF#+#{U2x z{{Xd%FAX3?lg#OjW-TO+G^K*T5kS`5e2zK!CoV2Vl$E4e!;^zG;!M-285BoSK>&dp zF(hx_U_E}ArtzHonZ$~kk2*Dmw=K5cs+$l(|61B*Zcw16xpc6j1~xt z(za>ZD8Ut^pmzsPCV&Lr7#43Z@Xpa&}YZsa3= zRLO#=;rVKQOKo$GO(#=zaFAU-*ZBsBnCTU{} zJIh@uTt(Or2-xWtx#tG+LNuEP;mMil$PQYNMq^USb|Dtkg_;L(cpE4x>;Vpr?i>Sl z8(=ox?0Rv6%#(>0Il~FMB(!c#1FVt>+*iH*F$pG)NhWtH&W=#V4yMvsM&7g-fC=P# zVd;u$hO+WANLa3b29ZDpZ6uO)1F)(b54zLaG*!hM zg#mYsBt4U#erLJJ@BVaB6BG- ztWiRe%NET>)HGp8-iqob_N}h#4dta+8Z88BT*B%=S-M9{1qAGTt;f%M!7*u`5XOc+ zlDH@V0gZ;I8bPjYZ;mJ(h6=eE76Q_B6snm9z=Lclh3}{?{E$fPM-fK6xQomkO{gSk z+es(b5TwxCaf!te{$b3dvh>;_NG7XM7u*j1v}(4+dJa98d2;I>iDGBdB*6Jg1g70X z5%CQ~@6N`K2+Fr2$Yd(krO*IJ6gM1pzSZ}r>zE*&6_sP82+T?nOB&c3I(*lBYV^jX z^08);Ir8R4SyV6<#=$3H?Q4IyIOq8-L8(7yQ^ZbDaQ^`62P+88A)Ul>OjNI#f6YW! z2WAKF-yKJu5bF%mEU5}2`Hc|OZDd&3*&OmN>e-7cBuvPKRZ9Xiig0!yj`g;{;eXXt zl1TuOC4FmL8U=KVyWA1*jo<}p6 zh6PP2D=d^gBd}dSnyS730DoLSq!WZ!S9jFMNcd0Yw`xb{v2W;T+US$viX*28J!9b{{SVFG;DU*9k%3iiwy2##HNYAGcQUu z1|?8fx%02;GOr|60p5-=OUIn0@=k}pA~BE86OWV~F08R0p(!JTk%`fyjZ95fB8RBn zs)2kxJS60z8L1y3n#^Zs>#*}Uk3?*{X*4wgD3knK0zt(w%}g6GujKtCBAk^07%MP6 zi5z-^SZ^m0%x1(wHIjs#Fyl~gTK2B=K|kq^#1yfuoNVo7deNFOO_+g9fD+QlwNQFD zyL^uyED1Cbd0QprJyPdl%0`toH~{Qx`QgzlOB}_@Q)*X^?9n&2oq+f6wgO>g8l(fN zJg3YE1quOYz3by{K3LT5jFCor#2LENT#izgc^DE@t=aY1U)u?~uBPh$0Q|7DSQ1SN z{qb$$d2q;eTC&F^OTk1?K>%&H#0CEV>@iO5{{WPcW#jzZgoe^D&M0|JayBOfx6s?- zwrsL566Nx-9ernl8)hR)y$b{$ewee2tH@)4k>X9(+%2(F-;KcikJmGk3zW{}BbipA zm=~%CRS@Z>!SZ0D{{VB*c$pgH|eJ#DPMGms-l8S%@BVOS5 z>;c%HmM~8eZ{}MxMtad=`PlO0qKOhgBeN>Rpb&SiXdU}tGWgc8B&HHDw(8Q^9+Uv! z-QvD%)KJLp2#6lCw4FHE5CI<-vAZJsVRo6Ph@-o*gbK{MYRKPv-)i*lijR(+I{eTw z$yzKY&C5PX<8mFHY)H;XB#W4`x-oXjvIk*Y_QW|Kz95NTBhRS%xmzc_Cd6;VQN-pOWjM=Lyw znJ%ifbSWUE?z;D@yS!t3V=&T1GEU6VzeJD@0nikh_5g$Hdz^IqB)ROvN6=I>}=?- zO4dkT31fD4#2J-ar>-$4O_-rIR1F|J3g3R>!8jC|X|)lgnHB9wW!-I~AY*Mp&h`s?(Bookb9js>nQ-#PLrEwAte#o`sTJyM5ng~_d@mG?u@=h+;t_>c zkt_qHcRO%4-1F48&&|)#S$dShpLdP7mwk1z%;-P3b3mKk*nv(omfb)G-vc1Re;PC zGM{?mSAKo*Zs?>;OssVxuCAnut(VH=xHfP90I=Tp`>mFXua{{u=_0^fA3Ke#Ka}me z1r>ZBo)4H2$C#2@M-mgI7C;WNwzdenQg`|D&OB$0e=u_EQL?LO)+5MHK+BU|5rPxGsC}qY%oJ zks^vMNMV*Bx2Jt(tdK#i>>ci~r-(FWCoC0}U;tFXf{?xU;>DZWd~w=xmd#A9ImVuK z@{MGG#H||?KHCG0BIhH@9-gXfx)f5x8t&%)RzLe3XS^x8*_D9O$D+#`BVZhoD%*7R z@9Bt1GfS0uNYd_ZL3Ash-2VV96!F}Ye?K%@da-QIY4Zfrgxjb=Hhj&%_o7d1TX=q3 zMq4SCX&`+9oui}ZyU=4Igs>7WPT z+?x66AHExw)KV87>AITQ_5(>LzTkS{C@(XZn>yfzkzM3= zI)|COlq+w)d`15NR!QX%=;9<>x&-;ZU~qJaRym_lP|G23!pgEKqo=)48ypQJth+Q` zA38w!2|7sGXy@xNVRh35fUipm4f^8q!+cDL^BL29RD{L~y=s-%A5H6X^Ek|wkq3!M z(zX$Z7efQ4*fRsapsP5&e7xCj56MLwk;OZaDC#wSqtF0>?A`Is%LLy406#h!n;6W@ z!|>L7^6eqc<}P(Q$Vj76NJUW~fKOqt+ZI{;%yD?GM<{2_v{@M=R7jW@=@(wgNCV8g z@6Ic8!X%a~hEh=~YL6+^52>`J01si_&!r4lb&_1}YGB&ZIOLG9Ver{iziTJv1~Ikn zzQ6h(%h0UZml+we3lpH8Lm@-G5=c}Bx!sAw9xqVGl<@qxmqA5CmW@Ev!EC4}gKcH5 z_5;2Fkm+U$Jfo^2bv1X?k(bJ#eXuOPWB%dd&aSOIi42iORlwbxbr>~`wCW!$Q&#@~ zZ^T?#Vlz#i@kyD+T4@$G9Vq3Xy18QEPPQ~Jur>w~mUAX&JNZ@%8lpnXr%@ofwvyF) zNnKid-JeV&oXkZOeNiGx+QTG8RDeR+0N290h~V%m2?<6|Av8TkV#ELjz)CkQ7S#ZHoFF*}dH+Y%(m`HY5MINmcMR*{Gf$3mzU zN%b3hV^6|+SD+Nqps`IJcP@yN-iZrKm;rzc>uguGf36)sJVDW;cgpyDDpE2o4X zJhBIyI&(RQk~K1;0s^xExHb*^i2LHEc^ri;PGeP04*(yv6TTgiqiW1qj4+sj0*6&& zf4LMl;|jE}ta@_}>LZO#v;m2II}S#mP1)^do$=CyQs~Vm^%R?5Nr-Y9GBlDM4AHK;v~9uCNj}Dh>@g-&*PSClrQ1sYR)A8cp7@%vCiraxsMa-Gc~sE& zhnkPq8dik0XQPd!QgVpS5VI3}n>AzP2OQokqgKl!GQ=CHFX@m5nyb6tU>TR^L$)U# zU!)f(C^*%uhW;VGn;g{>e0RaJafv4qFuh5o0ivtsTm6RndgC;*T5N1rNVChvxhV4i zw78E_$Cy^7l@(Y5q<;}7)7Js;@iaE{0Lh`6ZQ_C5ikq`cNLwJ)lA`-=ah=OD{$@cEv1Qa4 z2?GJ8m;z09JD=?0ej%Jx@aUlHibB#77?4|03bkA9!Q*@`aB`@Ynp->K-X;0mws$b{ z5c9l;E`-+Nl#;tx8sB>Fu)$%=N0gB@w1PMgma7e=*n{h5P*2|#)5>$X+}lN|hP&B` zEg5bo-=g#B?~A0G{7z^dMVG3@g|S3X0byWOR{W0GY?l?q0JiM!i9h80F(NTMWpeD& z6T!4)AbRgr^TlAxNj&kykg;=_8ZL<1Iy`gOV%Ig6mo=_TtP|#nQ6e=iBW4~_+lo+h zweSeWr@(wF9}CSxnk)H?!Ui2#W7pFvwQ2!B`ir{v#!2T`G5y%+!OliUEBGJ9{9Xvi zgymJGFB`~hn%79BCV<;fvyGpM{vYr!!_wxZ%A{GW>dFzPm@6`!B~+=R5s9X-2?qBV zo_oZ)G$=+trCSI1gT+LdUYu~pF?D#!n1kl9!+N?K{{Spzn<*sOoYa|qq;D1Kq5-Qr zf!epMT=={hT&&V3QUz$ijFbhOnia+W0PVftc?|AANhM|2nPmzdS%iL5x~+VVJa4`< z;**9u#*1)G{LG9K<;RZXd|FjNvBa}HdP|8I3a~b_+>NM=QRdY@Ka|ch2F$$@=74`U z>_rd`-&|NvE>;y1#g@#L>+B?zbi^oMZOI|K*Ym)0`9^3HGY}e35*GIqX?Ex0B-Qi3 zd~3w`z3SC}AZHZUUvj&|ygE6aT<2A0>U9^V9Rd6#wfgP7_r-2cBL-!f8Dn^Q;HO$O zwxuDOm;V5W?nloTvMeVn9-!hScWC2#Ar_U7xhHBLxxn16Q#&yQVskQu1Oj6;eLwXl z?lHd%sbk`e3#8HIz*6fj1s8$%sx1puuqb!uW(w*V41nLnm9V9W$zs!d=DTB1j4 zB!jWG@3sQ4MDU`*;TBg>$Cy&?3I6~!>GmYyvT8E&rN$#xW5Ew(fwt?ykC5ApTeQnn zBzr1_&^t@5mC6DOFa$MO72HOim4sJR z@Vt@&sVbG0*FXnONx9q*6YVju*{- zh%6=YQC%u%>AnC_UAQ~#h{2U}Gb*~7qDP=zYg+11$J&AZ#xIk@25EAM8W}gBpcx3O z8ofXqY!b$=hu^VEMf1pEU+vA|!~ZUWzCOYam|z?|+}J7R^cyYbl#liBnq0$}9`h zOu!Be-3`UB1Y@2=%*CQ*jRZ2YIY!YN2xQX4j(2MndkU@b)lphG(vxn)yh2#Sa}^E7 zl5CRP764z&Z^oYFV$Z|;L&d)j<}%(P;hEglPcf2~NCf#O5xZ(ZWmwqU6&~PwVp)R> znN^T!)~sD4SD_hZMhXD#dW^f*7wwB^)ft&%r&&y6eTi)h+nzQJz}tUpK3RA!jQlzF zVY3oQ-X)leFtSG|dUDhbtqZo>S3f)XVt@At;0<|jtXQo`C@|6wemVYF1ep=yIo#fA zD56D|IL;9bXj-tM#gVzIJTCa?$@JbjWip85W2kv=Mc*7#50M1+#;3#E6sI(_wpZr! zjW}{8jZ~RgkmgIcXvU2MHIhS7K7gLb*zt9W-vIEu!{wBOHo1+30*6ZNki-Mkv^6 z3iB$33P!8#w%`i_>lVceLkl(KL&(8c=^&adu;kbq3*-}v2)q+BpMfR?CRbJ6Nm`Md z6}^LBKWsUa{6C@!n=G#5Q>1drREi(_4Y=plvCMu(JKNd4;*wt}K?3G{Ip&)$43C#k zr%TFLQP>l>yRJvebA@wRsk6C_5W*O#Qj!sXBbsM?}*#ZFOF}du%zwIiCvrua)CRkV?8-SsF%? zNIM#{``!&6Rz#XKSV$5w#~IRnNWHB8014mFe6TW_ak0uS#nGB1%g>}0&`ChrM{Ypa z@ZVf6uL9-+&ZX9J1tU`dsC+-ly|DIoEv6!jhFIM-D@ZDVBa1s9f7=uptk2}3pQtp* zv8t#+yAVJF+i*Sk#p92}dhC*X7ROJDWpZgNF-XI*q@7Y--OoC1%?dt(@6xf8@jnn; z;8u#XhmZtxJE39}@j{6^S+icdOqZ4V^3>Ua1Erw>MVj^k`)zg)2Nk*39&kw{pmzf3 z?`2eXBT>HAqaDv5#Ev;%vx%R}bBjr6SilNO@{pR-svoCBF1`5P>d&`490g%A$t&hi zNU=FalpEP>o=D^{q38#`4(cUHO1jGCGeAkDMxp^1!h>M;_a7OxrKy?~6 zMeYdO%g)=AUbyTnBZnt!?rj8drLwC+rF9tmz%2zIRR*ip`P&uHw2n?)f=0wNVl&*n zNmu)I(Q~(LoM&+;n0B(4^EfUAlA|_xYAdtO|#Cw06<;Tu2k}O~en#^VV z?bLZIvD2t69(f=e`d&Cn%a=f6)?I-oRZ@Ty50@4G`$h#IFT%*^+DT!4VYws!0Jr>UfCufdGOv=QDK`1Od z*6n7yP!+Mp1r4FCj(BX(9#=J*wuL7KF+iOmn#nxfs}r~N#VqsB7?UdF{5R?Exv1*e=!10B?EpzcQAW4~c|&*c%8Jh+lD?6Ph; zKzt+2J@;j@dBv`NKs7{)S{S29Knf0)17)vGOmEkmR#@_qMMp>kZeq&nV;d6A#4~TQ zx3B*I6JEE1{$#}_TuUGTD>L&!bb?9t4#(fN5|PV3Rdm+2R%8kMU&J}=H6M{U>@h2K zBV(vHSr<_ny8u1GWvcm{UkzC=W!6~9nb82EnXJr`NW|L>1Z;ga2EJIneCeOWS26l* zV|S4WO*)utwrgsjeSkQp$ujhlG(Z-rk%<7Kt763Z)nCk+X0gm zyMLdIH6qi)v$6;$F|3hA7?NmrqSYA#-@iCl!%>b*wvZ)#2!v7FPW=4F{`gxhnQ`IK zgl#{kFbB~X0017N9LnZZa(*5jI*dZ{KA(%?3tVz294X^zKioAdCL~N0MoyixkONrr zn&!0Oe3VYG&zQ_Uu{$Xs4b)L5pIaO6#bQ!sL{Q5oN#lJsLVySdNeh2po#9fqaYVY< z&VM~L(2S;DLlg`^ZD-?RNDNq=_&@84=#^rR^Vy4wF(Ofmn}h0j`C%DyF8x)S;6fSFErUsM1{`~hhrZk6N1ul!;aPI~%QSz& z=xR3U-S6JNrZdxK6wD+8Pfl6@ShWg#+Kul`e&Kk$YsGK$p)&iGnTb^84-0%E}_+uJqk?@9UFa*atJ=# zW6KtoI*A2b#6bX(daA_Lb7WB8oOGcT6FjYnh9WI4%FYr?P%p?{`}VN78FZYlQG%pr zrd5x`1A{=-dS1{m0`KoY-`5Mr406JZv4qmDNl-a0N_+bcrZg|8 zx4j+E?WtWB%h@{3g(mCgy5ZjnwPQQ50yqwVU&J>A*EBzRz;h55d0j(3fw2mrhM)$+ zpEd94Ty2L+JV_Q>C0>w1#P(sQl6#HM()~^XZPf)76+zG*L#as9w&L{fzsiT(4ESh0 z5V9a3t34L5Meco?l!5oc48gUKj6`U*8pRf3DBoaI52*AwJSYM@aJNyJjB20&NHhk< zw)XVAW|DL@c0B{OT3ptAmJ>wh^7UIevP~X9T0VmCyqS;j2mhmI*9e1_Z6$fWda-eZIGgim#jMPqCx$D2$nmQf%e6k z<{vQ_S$es)Mkw+14N0<+zx)6oSA0|C;+HiHugR6b{Wo1gD$)a0Sf%4@+Z#k&w}xfh zLmsINkNU(DY{PL}9xJ!k9Q^W(UbiM|gd4A*i)D~yQe6001hILRGhR_4JCj6MVX)`z zir*CRxN}WDS0Dh&X|~HmNLFG(wYfG&KD)0NVt5X2CO7q|)g$dYM7r4?sSA^00GFeJzPFpi4 z<`6VQhYWbxhUaieH{TD2cflmTpSyXk!%m>5Q1$i}J7FBAa&?U% zM=Ffc26Nm13Y(#|HGf=Nc+ZUH=AE-}q*+j@qyc(In2kG*2LAwIiX66KV-gwFC7&xJ z>QodSwB4R`)%}h)V1%a&XkJF5`y^+RFkeiXEIyXd)V8O~&5P1NJ-%3|lQ44HK%E+I zh`4Q(ZUNLd=X2{JWYShZNFgIODr|E2{j%VaH-? zqbq%91ASj=pB^#B1Z8274U_Z(o0D2V3QECM;aE< zW}OPHf9wyv{csH8Hd)`KtW{R6O`8B+)!m)P%$=}Io?uCt%%))@sgNXoVsyH-kEbLJ z>c$+5G>5$@k;i;1n2aY-Fl}Lg0z?nEE#$4nnm4)ZeX-|(TQjVxsWJ5wP@#Vi-=B-n z*&}1lFN$UJX!iAb{Cy``b4{ODqU&mZ^I))yjTNIH>vDrm= zhrN~bCb9D-AwK`$? zYhY~lvBzaNqR7tj46`4JWne&{NNN?^a5W$PEGv7#yhlGd%@)k5GZD!`SlbmK5yr}` zUe(3}2ZFHAPx9aUjLqdK!MgnXL$fDQvXIA_j7S+;7AdQJM`K%9Adq?8Z;VbUoaeH! z1cp4klZauYvVn;J6TOl)+-^bMg<`s4ENrgj;}Fd!k`jFNBR7)$3$K@JC)W?Uwq|Uu zbi*3vW1a~KB-52*2PTg4RS{sJCWkxtxOdUc-ckT&xUaT}ERhG5J@dtXEQb zz7FP?`Wh5)Y^;fuOqB4uN?yw$Wn0;RHVteK?hkxb8FP7To5W*|P@!XxD*{jP4PYJU z>KC~SHU^-~FKZM|-K!1$S;`V)byXo!;j+}cwS4~o zy1=8jWI<{cGp(KIk+p>t@RAFA@4n+@xmc8_OT@DCPnOKg2pvUK*j}1y9PSS{^d7Xn zS&m_&CRUalzF}sqWE3Hk0bp}P4t;TLG$}ua&pa;_fx^BOeW>;)EJ zNj2DUwl8yj9PA0rqOe6xY&KK`D6lB}k39XF%Bo7p%4BH3aZ$M|N(lkBKAhitJsOz= zWI(qXqXi(9L8uKxA5+`g<&Q6yJf$}`?C3>ONUO>qgE0gwv82>vg@`Q70N7Bj2G)Dw z?7muP^F3KgEV0OBVC&^kUl!mfz5AbG$-}Tg9C?-#>Wqd8OA}3NK#{Sl_Mz{BHeL}a zRZT`pVI+VpYyb)guE(}9ys5s1e8q6iI;)7M0q3QXO*?OVJ#I*^Pypv0T-=kgG_KM{ z+GGgi=(6AXfDXfOdt%ZjR+t%CmBUjRj-5mi*c-48-&_OohFJ2$UtU*MR*opkDzN6* z5^DWVd{;PC(O*0EBPMT3$R#jqS7eV%5b6SpQcDp+>@O^8v1E~Dqt&36Zl&rGk)uHM z{yzTze)WNbYhfBm8!%^8=(wq09mC2e)oR(w(*}HOyuygI+!82(vf__YujVKuU~v;a~Y@2yq;5~Ls}33V@Huf zs`eeR>-=nTg1L5bvdfdC3G*4KW@ls4(WA^*u~k(d*U;U1sK=z52)bz~kR)LBH4+_w zCF@WFVs!mQ;t}cnG6Ef7xhkU8#drC0*nM!cW%JJ~<^f|enO&OH>)nYzL-nk5BfXhr zqqxh9H{zi(_K3{sIKYVo9`i6AFxWjx!Z#GSF(XKy$YQ*&L32ZTCXbA)TA7v$S^cDYiOI00oWf z)Uf{mYzz8L*73 z%p0bm7f!GVx;MWKLxW!1b_pVBArfnhu~2vN$UzLlP!y!8UC#peyaq4)iE^ubJ#zxR zB{rja-0ijOH?LjJ7gQCGq@8(VaKIy$W&~f6My_|UV{w2d?m(%v58ei zSi+@OQ6-$o$?8@6{6}wWFP0kf5<7yiblz4~Qrc8%01$hBA>Rg-z{^0hAaqu8LO8lM z4*PFp^Tl9D7D&s{Lg=vvlWl;fHa72nA%&~r7qQ<4R9W)lT7emDMOfXBH~#?Z1M7ij zrs_mVk~mv1+I5z&Kg?_BKYv^k!X+`uI}nT(Jzj>$*m{kDA6z4zMGUa)83+Zxi+*;m zdir5$UPAp0__krA%qy@_(vYj~ep=JFbLHCrh1MadCZ~-+jdb4n&enGoYIyH~=bxz* z(iTXxuUH4A)WCoM^)$`5af##!eAFbBZBDCK+^l7*)`x60Du6Gt_2rRa%SI&g2voN} z6*Z0j0I`L$6Ew_`NL`gw3+K!L9QzPHdayjwfS}47%ashu!^|qhZ|*;Fg5{tR<|S8M zW+c?bK?1B7Jb!y*B1tP5>|%1!hDKE-ndEObn}Qd&8i*(9iAOQW2 z-|vp*r)Fp@jDVl^}&+%Eqy-lCdIor2}*8 zSM@kd)vOwxRz3IIhlks;We}7rR$hk+l|P(IxlVt$zN;4Q9M#Oz{LniUcRo@ByqkSh7L~TE>Gws;#swsbL$hR4ABx@ zx3pJNL+F2OBO^4en3Vx0NYL@UbjUv-E9`K0%0E_ctD`L-1b~CES{2yYH}&_xLYbN4 zRq_o0Hj%K>Z-0>=%N;n>L_Lv;$bl6DylC_ZAct!pkGS949?Z^5NuUX%GJ*pxnhL>J ze_*43W6nFBnPSQG(G^aCrGVTO7khWB`(Xm@ERqBpHlhZa8@K>|$8Vpm83f+IldTX{ zA+5#_>L0v)vFbhPT%^ymy-x0XvNSv!^MgZ&u06-VNpaTo-*h^X`AV^G$6)Don zn-WW}6+`FF^nSRq$aP}>0Jux%hBu!~1QHa!x-G%7s?0dsUMnMaY9$P^q^NWttxU|| z54NxW080&LGe;!^8GNo`OnG?BrK1c(NvQAU^(6&-u(>AwQYTvxm@1Tz$gJ|ID^FsM z#8DjBz-4ilO=v-OEkgHI73TfGA74xwOsg3picLB<<7N~XR{pv@xxhpd&z4@8I-OLs zdjMI<3q#VEx6IZUmWbZZEK-nlER4m$kZC@H>%HRRBaxTHJVZkZAOx9R7JyX;aYTHu zzDg!cwrrs?2P-0m^EQhj%K!j8U(XtE1dL^7)C_Gb(6N~wFgMhrNCW=>k^3BD&ymGC z9gSaMXU;?B%%)(wfC3_ur%B$y9QL7GRtJ$qm4Pw^mSVxlbG^{NPvO{O(;;bQuw;@+ z6+i%OvryY^+kbRyoJ=zj$m%rfL@dK&=Lc%vfOh7+vCpL#>6;_I%XN}C^4W3_G-og` zTGKE9l{&WEfHwEVyux9WgvSfCt1`)G%qf80tA1~vTvg;}S@M~clt(17<~2kNAfaMP z*27^!@4p*!h9weyP{`7)s;LS90Qm_Z4#0uQ-m0%0`LCO1xb$^p{Ktzt4LXHFSL87 zj2ok?erh=y${ zMGGZFjs*d$fJhC#-^F1 z?h&a&Y6(PHrX=?5?L=cgCX|S`O(R#1H3ef*+KirJM&NuhZM6b1p~@tAp9z*^w$S#? zqx9&DKTF^0JK;HWtY$9gjF3#rJG4Sb)+al4hnVpsb7?K>^y%DgwtH*p~W(?Zm5z2}UlFC=q zUiH4luKoQibwL`!sL_dKa-=3$4cA}@y~dX82R`^_FAo!S75wqZ8CL1-Rs$)a#gcmg zwF<^Bk58L4Hzv5*k z1l7nwiW2<|@V13N|$IPkv6` zr`qvU*``S)Q3Qf8$OtuMYupPKzg@;P@8E5eic9IRQ2LvxqokwEOKrs|`yH?`;Ho4t zOKgM@9+m>xQ0X>y)2#OGzA;NaD8}nXnB&OVK^sjx%s(ziP%e=3^$N0;qjj(wn)!O; zdH7?(hlOPjM-XO_b;%%7N1Bb6y4Bwc_~h zK&4}K8)#7x3dC)CYqu5dF~suxwKc*fxMFeJ2?XJp8h2qnu){=ktQ}Tt9k$z#Ti{{5 zp_VmOGHTM=gCi3|RcPOK_=z>X+hV3{+DDmHSs1Ww9(D5tAot|=*mlFP>MIm*Y3iV4 z>PVniQFNf~UT(+N9LF6Fud&!;lc|~NFF_Ye2?L69M&#cgzT*!W5;;*_kQ~4@vjWVHJ_dUomF@0;^Ebo=-ow5hc9IOMzGpGeN)G7^K=01v3+>HeQ;J&+F~bgc1p}!gjd}oZfwYDp zj=+EqC({6De-Amy9$_rRMk0_)dX1740lfw$@3*iOIrAkR1c6WG)X6crR63P`yA}W^ zd$T}y+ZD3r(tO0CNk}si8de8e0HK90!4z0FMeo}SigNFu(DFvALT0BDv{4lbfGbm7 zHvan)*k7(bdHCzb;>t*we2eK*85y9GBLbE*sA0MDD6z00n#R%mMhd|b|R>3^x4$jIRe zPi(KAc9Sk6l{ExoR7fK0e@-lVZ-DhcE2S1lh>!#au^wZna6u#X!8J3;V9K(j1yD4W z3b6#yBfTHxiOQlj(-K6jWkI6XK#~XtZfod1c=kHz;$7?%y(YI&6H?xtTd4rq03P74 z+l}yy01lZ|NnD>GVar(DgLlavY*kW>T3wzrKvq(T9Q5)!rJzL(uEt|Umy|36&_F3T*GeqV_ zim7C>HfG#y-)*bLg7F+hl6mr)sFR7ku9LIsWoI*!BF z+YR)ZEXniknw5haRyF0a2qYTbhimRJ(2YzA=$s~#t7cF*dtKP`zo%>s@d>LY*=a=4 zEZQTDU@85NLta%^rq`>gm3>GI@a9Y)+xrY9MiE5}!J+HfZV7 zmophZ!wSme7ea2puu0!xUc&;+W>(wF8?k%=4h#+t3qJae`z<)GwmB$fJQR2^)jfZKvV ztJ{t_tX9l=QYb*`x|vG9%!=)}$Mv^tk-1bJ)`W1H?VHjoB+Bn|-^w3ICfJ8BorpSBc<*QA!J)VfQ0GmWo}_TTG; zrK0a;u2x9@04&l*gG`e!16gJwh^z1XJKh^pEE_}Gyiy4ve=-st!2ahb1_KV$Mu963m1K(Wmv zib^l=olRSI8-t4NDCI+~FX)zH#?6op$LITC*^>QJs6?7$WO7Jg+ihd-g>w$MhIsPP zs_S72fV{TgZN*q0QHqR9Cj;pc`IbjHW(9hOpE`r@Kvn+$Ja)nTMtXA#sny$0uWQm5 z>QAl%7Kxf)SrSG(Q5tJ|+OE&YoIT-wMwGdj1yCDMK3;YfpnBFWRTX4K_EiKy zRY6%gqe0RsRPF10e=+HWGV&wK49&iV6bUtBZq?5?ERqnjEM-fQXORM7-{dvwkn=Zq|GG36fJy0hTVtUVDaPs z05j|vKSZ-xhD3BnWB&k-QS`URN~{Odf3_Bu6DZ6+l`K?8iX zVzbK{NMw3cp+>=0W9U5GA8d0mDP)#b1?F%AF;PLnsW(@(f;$bn<6K>7385_6AZ83h zsUv123KWxMvz=f5iT$w&*>bTo5ou;8Qyux^PtbS663E$Bo>}G7BS_{tARR5Xs^6O* zLyElYx|Ty~M#Qlq#n-R@03nSKT%CFWx)IE>sV<{YV_*V(f&RE0NvEm00~;s-LGtfJ z`~Lv02{M`Fa?#vn>u>=VT>F(6g404kP19PQ;kyQohVAsJEEd4?IjB!9ED@Rb401yR$dH(>Pd~pb#MT{xX zbekKL;Mn>fz6m6dHaC<|7cgZp^&c!+HehX;BRn;B+0= zA$RR#@4gt#I>QuDu#2KnP!xgWqDdR}*=+v+@9l*rGIb}1W|^GG;1-?lBXd30!ulSV?p7FkS)gs$4lRW9_l z-!XxkN+ri^FN46joaFFpjC5izse+(76p&31^BTTv^NU=prfGBLGQs5)1+fagcHEM8 zzbX4;nRWb>4&gwOKvmHw1v-_00FD483gVArk9aFQsWxsv$5g3cRVo8qS8zG@zS!mE zmo8HEqiiG64VKF=QcOF@bs~~%c}W{rd*u3kaU9lSNak23Xv#PZCrr=)8fGL8HzVBE zI>5mks=7j`Cf#=$fuUmPY2fpC8-AyW#LpG9Nvbqn>Pg$n@4fIy7)okR*{5qrzVML* zS#1isQa59>u)Pv|Ru=Dn-wgPcaI{h=RZ3^x$zvX!uD~D;!{>^QtYaq`iWnT2WM#Ue zd-JFh>s;gC_aQrK%!L{Xt!pjiEu@_zf8iwi9AbFf<;Zs>6+mHzICG{%aTb-Zz_S)m zq#74n={pNP_@~bJ%4MRM!s{%hkRfA4EfZjku8#ui*8=g(_?~ra?7~TeNUF%~KoZ-I zLEzCOay#QMot34WvJmOlG+ST*8xGxvA1_>W@Ob>!DH$?m;^`jD&XOq+o7l5kZ$_{8 z$0I3j=u$G!Or!=o{+j_`Myl_D^cSfbPT&es?Y6;(#dlOM`QY%>u*mX&E=Db(O%EX~ zK=iucY%$eU6k=)0*etsU%u+=i$!_7S(&{>402ldLrHW#&9EsB^2_OWMeZ2ts z3{NI&RDdZl#UOs$7}yj0ddR^*;V`w@MB!lb$u9*|ID zj)4a1h-KW^I|}q0^NSw{V8U3^#0CXA9FjE<7?1^Wc&-N6s}#9#91Sw-3a4@WHOKz| zF8%#6W#RdQ49y=%5Q;Zx1*ii{3*c;QS7E*eYU0a-sU}*5b<8`v^rzM}Nx>Gt@x@>5 z_Qz$*rp*#8nva$;cB%%f*R6Z)g1oXAV=U+yHf3uh&^!F}-rm5BMj?49OpE%WKAwi5 zOr;2|_wuz^^gg$Y5meJRx^{#$hUu7^KO;(Y$IB^=e8g?+Z`_*1)Rpk}O6fc@C`0~b z8mxib?i#G$8{wSN0Ln_T^#w`gT5`--={I1g1DmVvYXUHczlT&uUZ*0&fT#w&fF$1E z(;AVPlb#aLyd)~>QmgUg`x zB)>`75hGRA8~j?!8+sLWchAgX`|y5bBj+-CtgL8Sh61_bfWKYwhs=*GE&V?k zv&VHbzNO|;Ir9@`#EleDiAxbdS(NR1PRC+JcKPDVB+Va*G;>WLgi3-Dud? zzW5$kGG-VPD!pkagc`TH7s(a{dSA~B6C`=nk^|6k<}+hrppd{ZyV!j-W6#c4>(SKW zM?c|<;wY|?2Fyns;K~oM(Pk2j3c`5UlNe@+|i*$FHctVe%ds zRQd*ZxhqC?V9cgBSj4Ro7M0vaQ0Z==t$?=x)e5THYz7!+PY+0=$~>YbcPMpv9F#&r zs3ywBfS+o{hdbd^tD2Sq*@zNIl~3@8rXy~EW@;}nhKl0}(Bf>6=uq$nv- zS5NTYV7>buF%0avSn^UVG3NgOEG3Y_Gc2XmHF-g<^j7;>HFb=}V>27({K7z*#RKI8 zP^LR=z~|cs>tq#N@*mL`i~lU-yMj?HAamn)B=FJUX4Cl;I}@w9PdHqd{pNU>vu^^Ws zAdc)!4i_l#Tu!fcNjskV4{gD-ziq1Um*ZoPNVS?z*`ogdy2!D`snaZ4KvGt+ZnqnY z0=O7%FT{?}O30DPGB6^5QLyzp6MxSc8T>;kk~6+)RRMwN7%W!8&70f-w*9>^498IYo?b&GhW}`%D zHgLoObdERl8(+xdyN3*MMxu-vGD>f3I<*#7BVb9~^S(63 zDeOaUqT4AGEa=S)p^H+GNMIRjb^^f-{{Z(7d>KKOE{w|vWL}i9qUu-JD6%#!v8n@V z=Mc+gB|#$P^{O+rpiY8>(Ia1TxfT76<>V1&g3eHY6_!S8Ry0o=o4VSz-)t?)NwzUX zKFRbS@{vT*F`h(SCPqEi~V#bjV9|m5FB{i%<%K*nkK-P{p)4Wc??X zFwN&72Ou3F3%WJmas69|oK zpsS$pH|_`(cYiD+AyVY5o00+u`!L*}+;8*4r4?Cjd9Fm$sdwO5vBw+jRC{4m$uW`E zIT}z(Ymr=*ssMB8zT5MS3Q-o~#U03-Tn$mSh@3T!prsUR_;wXzvGy3`#muwBWD)?# zB>>&&EUR1Dr78vAm(v7&J!D4>qft;qriQJFDt;lbzn1vqY5A0#Yc9@$kR8b+kWK6d zlaWVnd}zHH>t&&wJc3F(XCb_)z-6sVd)GYw09(Ry+hu`eb!KKDT>^yN5IE!u`eV%D z!pn2&mOv4tdoNlUs4m+UZUudC2ArVQS4@-=fx5fizFqtMv7e1S47*!8#PnlLO{UaH zO8ScWZ+!aUcw;fFmWOzb!*fLU*m`%t=1CGkEAG8Ea9YKvs=s?YA504@4Ed-vvr&;v z>7k~<)n~UjuS6{wgSn0kkw8KU6Q-9}mdy({_xWIPYk1|EkHEXFTLZF=N5Aigbqz3f zU>ZjVrHGdl?jX~N)k;Rgy&8NDKJwtxj zv&p#XP}v?*gfKo}2Npeb4Xd^)r(o#%bqQ|J4(+y(3H9?FGZ&=okUqFLw|2uG8slSojll39G)9yrxjF>vZrFC1aESS|S$NgVoYKo#2x z_+W}HD-W5WW^@feR->lJr7%2S+Ypy9q6Ujq7mHCPjd^JHK9+u+@QM7=p@?=Y&Yh!W z^QPp2O7M5}2jh2}Kun6@$_Wt+76ZC;+XN{F6&q%SD1N38z6S>@y zJp~H94=tDK#}`yc6-L>S6E*0;iU>a0usdN`Ba=l1*`es_x@Zc%lhX3H0X< zW}8-C6CoWE127FtSP3>!-+oWF6C@mL-c%c$@&nXfD%eYny z!A9HOz#pgk;t{g+`jIq>;fz&ZP;ar`zkb*%Mym^}YVw-g-*d_AaH}9#!ZoCw)YZ`y z$6`1795rkfb~wSEKq^3SXZUCmyn1i@V6MY3>|3#>^X_*5k8S(=d*Tt)CU@LBAaKgt z2PD{PE!Nn{nZ@nF}s2_(Oq%*3Hx5=4oJ1RFg@U~gf2 z)t)yPk9q}YoTNvqFp_FS4kMA5QDaZdccurAPeW14X<;(bhPTUbdUjvRWhs4 zkQa_3h|1*oMuI)o!LjTs-u^PSPo2#@UgJ9CXtn=6#?qOeM`YEnt60O=#1DI12fcd{V-1cq zbrRgQY`M&Ml#3yJo3qQpEuH9rd{+Q%=zdtI8K#vQLg;7d0>zK4mPy-^FX?y;Q@cxa zM7;Dzd(|SRDSN=?)f@-k?o-Y%k?_(F`g&LZy=S7sT_14zmg} zJc_rZ4JAo$hwbI|2Ybb2+F+kBsnllb1dWKg7ykh70jB$N?SeuUc9*3avlJ_rMmsqg zbr3hU15x{YupQ-TVG>CsO4B@K`Gsj7U^`e)Psn3jVxyrBT$wl{C#d@v&Rnf(K0*Nl zfyMs-JU0SrIZH!QtBDC+&g{Cbx7xngPb6d!&lF&>Xo~Md0jXQ>u{g2JqegNPNSYcc zRW@U0sfhZAz)D{=4}$q@QD!Xz1#M9bvuGtqpsJw%0Q*9?<**fqkNuI=gDVmg?PGp# zk$8!ANi?7eq_T!zC@NpQ_5@XXVTFwqK{}4EqLl}aFr&44VZWqW`H00Jj(DXkNeLdH zHyRXIbHBoDVU%R*EMY^tjRQz+>zeLKGx6cG67BAK6ol(Zh2tH~fdpsH+{?V*4 z8JNaN7DX}Y3?9{4e5CBGz(zwaqkH&cTZP#u|vG0nQQ63k| zsSX_ZLoo8%izD8es`&#JnKkQ5Wikk(mQ9*K!hmI9HUy4%?ZyUO7P~B9oXM7vHJ+ld ziZ*ghHlQojb|mjyH=x1f&MLfYjT=o9yC^H9=_o~4%D3l#o+~o4OB`9bkm({tb8A6H z;0^x(gc3LHSS&4|h~*PAt4C-E3{(?)o8Mvc6^yP5%}XOlXceY8WoDWeb}mhT0HE7! zF4SKKZMnF~n1&A{Wf9~;RM{$&2pVV$svP=`_^SNMEL~kkPy{d=Yf=xy1z*Km3mbaa zJZ`>ravft+r+o{k`9*dj_OZ{Wqf|vXdn~7s*E5*>xe$c7k?4|q=v41w&d1PIVEzZ6 zIc(fdmU7{pBW93~DzH#j)~%0iuKjUJ&Lr~yp}OD9-JgI1H(v!_8M52E+JPkO}9 zh;*{Dkg`UvF)IBsXxjis0Gk$e+P=8UW&HkJ>&cc|b!h+%({mdxjkr3(gWr9xf~3++ z>Opp9Eh%tZjV|oOStpwyZRx)xdJzFHULwg1AfET11}2;cF}9fof^E)jATaDpS8P|X zK@86fg-|Gtn61Z404RLT^y?6^z92OtOV$ z(HUK284VCFg?i9Ey}RRCoDm|ss;eoI!lmCzd386TiLo$Db^E(YRKTRZ5^T4<`Oy?#)$v$ipnK zOyA)pGZ8=l2(2^^r7+tBjwqvq%_M5VK+*Y)5JQqFd+radV>^)KnqZ*IusXjjp`=uO zI{qIiy}jz}UhF&VhF8OaoKkDH^s>cVvONjmGB4_wA1UQDV!wNhFQr z8Vj|MYunoU*4WPJyDeHIMz;}&V|Ch=-NzgNJ8j3;6xsNdg0xE78Ocz{5YYyxoyZn` zlrYRN#XRl#XPB8t2voV&D)n<*6Z_#z?sLO@G6)3Do*|x%MMalVQCKkp%z5BfHVF#IOHtf(Kp=0Psy2@x{L=8cY&~bO)9XLfrP>DA7 z8JS)sQ6M8qvXDs%JJ{#qHNn{ZZ~$~Exd;HUCv!lG+lxC@{`=xlMXtb%Q!S7bof`vW zpI~v;%bmJKl$UE~QY-25*x|OWh1DPvOVnpcSOP_aQ@vi}-0XSBVg@CdWWUy3G7$c5 zZ`O|FAGQj)RV^WO(5}ZzsyKOHj4{a|6l^Y9 zEoVfr#H!jF(lB*Z05&@cQ?+tV7?&m&FwY#tc4CUeZB%Zk4&>LbI9?bCR#suvB-N`D z1&(;&16AMK4nvWHTJ`1$BAEpRCzh@KZohA)Fyxv~zwBv@e#c;vKBb9~OooFPffOpu zdfDFnN%h1*jipkvp-n#*78vulr4e1Y!7}*_u(T7-OU7)3N*ZiYJpTY^1YDe5aI7=X zn=@&iW6@3S3-S1lb@GYlGX|aezBoS)(J-x>`}?pexr`8*RsYX^ezPS;SBTW<=9% z9E>%5Ko#d|1CO2e!miGdfXvLw0AXCUZcSeH54Oh*WK?TXtjY{TyG%Ipohk>^ds)9h zg>zY@20tL00K%0)1a5xYRd?fl_`7_lm7+|+@_~vlXrp&^*n_a%fb`U8?Y03FpO%=) z%w*9Vz}sEzr?~H9>4D|EM@dy!fg&bLHp6g1KW_e@cEJRsMJ#dwAW*8i_hCVZ9FaqA z*e0no2fGh0VNy8EYa<3tL^PI3pa>PUZG*hdJUI0Fu1tkifFu#UpKvtSrQ)J_b)k(E zhJXneUg7+z;Ev|N;XQhz_b(<~vki6v; zmP;JI1UipY+jKQVelVKhptcedN-u^zu{KbQtGj$KJDAZM+JBCC&3FVhN= z^cw6JO?HO)DB$JnG9FQ6ZANx{QL2SYO}2wg;O(m3bqKB=eS18uney0k^*Af9sCPG`R)L zCQ%1n3Dn!{EM0Oxk9+mo2&-<IKfHCV@50cCGNrz|zeubYn#XyI%-*{xPWaqG4iov;`= za|5ClGI|ngP!+A1f$3atSBm4FGK{!|S$dM&TMN<&1R5Q$>%IXtWHS>YlwwU$JAbDi zd@VlkIAlaB6)8sM&u^+$HbWX+h-?u^XU)d3*X6|Ufk$-@K#je@HST!FrC|Oa)QwQQ zlQ3#y#>_9<VHmkY-mcRc1M!shibYEa6RaQoK(u#`2_jc3(?Zp6aIhhs|a;#)xgoJ7UD$p11 z6z%PT=&Y`}8&ZWoFb07@9&1s+JVd-hGaI8Mf>Eg$9f$;vaq;)(d|LVeAjQ-ptaJQI2Lb@N}!I&2UgcrcIW>9()PznG?7K76XlH&q-8XPYWL&( zzee8|(M6e+&{6*Y*05ZFEth7OQCTF4z!X6`g9^UnWb|&nD?6|MZNWxikxMBhxD3u?AW>@VN?dmL?s z7vDnVku4JI>KYc8s3;XSM{i<#{qf}8W74~`>IxZv+?pfPb-wuOuvWO~S(Fu4d$kr7 zc;{okxU**x5K9hCW$7v(Kp*EDZ68^9dS+j1Yh; zQx#fvC!6yQJq`uvWK(?;M^H*345A4b8n1{NG)Hk?VTWXrH|fX+R3w1fokUbtdG+c2 z2BF^xEK?xOBcVwm$_QJVu)$B>*kjoxZoFbZuN#ex>RaYAVHu5+;-se>4qj%l_u+faiW!5wpQ3v z6VBW1{&H`(Nn zHgQU*LA{0sfYW3S=B$(ZZ&*AahJ^`;!gU#RTLvVy=D7oIeUEd6fP_n?BuHUptw0$X ztIS9n-p9Ad;eg&|NK0l8sRU*?b!!CBAdQ!8tncPN#|<*c1j-aqq<|)l!v`RnVOv*f zCvMgDV{=Ad@pelL7jW#e9k zbwi}l#G({glm?T`#1-6Ml^1dB4<{8ZA}+}^N>ef%mM9MmM1}+%J+{=|{Nv8gJkuDD zm8I5#Hz0yQH(*da^S<7L9)a}Fr}Xp{&c2vsHE>hjKKGH*89%Rw31jk_zl>x!i3;7WA#Jd=-O7rP6Zo zR7F%ger9kSlc#b=zf)?K%5O^UI~~csH#~M4EQ+jLC}M@mkA;{h zalsece)dJ-+@UL(i6sVTBSZ&N6k(-qwGQTu_o8wQXPezD_5$Px2?AJJa-OUDoY{)LRnBs6RXNp_xMziz73{Ekwi;O zT}wD55Y)Pdr5iQwpl@dikk}$p0XdK_l1RwhU4mFJDt0spIiX-MG zCTAke>}^4#{$aM*Lv8fU3MqOWk1efaX$F%i1*qxdDP&u*M5hPVyKWN#CSS!8NeUw( zu+_+JuE(E3e)yN98f)MeKM##oJ}#+hFDLxwVHr%Op!LNC&fOSjYxB6o&^T#%1Gd6*$hTDo8*M0cHc9C(W#}ZYFMPf;k z!LYZaFDL1D^v4Ni%r1peKm-;B?3?wnclw$bQPv{~X^^O*Hs;BqfN{Ou;F3&GW*v)5 zpa_ryq;J1JG3j@E;Z?DWVoCE5WtohLG@+S#j2JoJbL(K&2l-+uvOI4qWtK4LO0^Pc zYgUna3p}3JSQKtRkyon7k~+M1M76auZ`*IaCnRiOa!~c!5<>zAU4aC(Z@mpXgGaU( ztE5%3!!4VWE@hO(sTF}zcHK?Y`i|!A$QVaA;-(y-Fjc%;g#NQQWte@Wqc(07* zd_H-eKqO$^P^qXh?W(Si*S`4A2bM%v%%qV?Ic&DN<>Qm18K8l_5=^1I9_>{IB6B3h$@zFSexF6I}$9_3+KIKbYPOG^eM}G7@BDc z0G0sKWJt!oN3EOQ{}052hK(c&vUKE=p-&)+3dPSnFJcw1oj{&0;bQ6*SurZxy8IoSRmK24oz&8&-@& z0g$)?N3A+RJk>EIcJ$i|&6&{og^jfmSM>*!v0DVF?kp+q$v8iVPbkc06)z^EFzuP> zqN?TDi_#$~K_;Ibtyh8Zj#iBo*6E zo%Zu=2>J{X0VZAbWrM{ea6agzbt_hpU*)mSC)DCfYKx3r>{>YiQnD_!~_7)``;Lh@~lzJq)QqIfMSfMtcoQ^Be$>8@aAGjTRoRW8e)=Q z#H#B&fWwYx?SEb|T{QBD^6wmyewy`kwUVu{9%H`LH~DdmF=|vTssom3B9OZ&bqMLd zn#mh;>Ce{?@da?mBxaB-QIlrwlnuRy72Av-!{yeH<(5M0C7mnDl7Ji5=Z|iC;atR5 zmSDj}0Klm3!v6r%<%cWfuwJ4g0y0P+rp&SVnRNkRcDm+`@ql;g1N}SUotXltr!f{P7jG#))B54>vpND~X3ac-Km5IA4SQLz3Hg#L z`(fDxO_%B@cM%2=lJs#_2}p`(v9{VhU7m-L(+k$2epixhr-xSm{>Lk-1G z)p9Dx(6&2l3FEch;-#`6u?2Y26(6Vi8l+>SQ%Pr@DA)&z7_l*_1WeBv>9(@W2@D-t z)cVmL_(Lr!i1MWli2^ik!FD=ak*j+<8zbddYH3tuu7F7uka9f0YP~i80IgRZ=7tw5 zvdF|F7QFWcphutlVsvgAb5Ivu|US#Z!Mh*SLuNlIljAV@-XUBju%)1ZZ^}m-vL*!y`Cs!%(EL%7?5hWq5W?c`9@_x zta>XWl6Tlu`LE^uD;4meX{X%u$Z8~$V@{z*=BIySg0`}&9T{0sQNAJvmr)dZ-3`5c z@T{LTAficJ7aG<;q;(%qJB?z6clc}f#gs8NR!)%8nWxgagHeo@Dpu~VU_Rrv7x4HM zU>T6zH()lV*?=0pe;s`=;Z=}+To}@6#D&>`I%$DEZTGnW^xGMx)6mo^?74-OGyw|< z6Ica+C6s~1a2xIN#G-&rnUzp<5;h`2H!-S`E47+lF*(sDbx^?$sM&P_2g78x+W}(B-1NYSz<(LK`Q?M5iM3vy?v_`d93Qlz@o8? zDSHG1T55;wM+TBdH4^LdNhEBoeqtz(e@*dRQF=wvz%e;pK5=4+{jB`HxYdsuLS*dd zP_fG-g2HPTU=zg+{(OA!2X;MHLiQr-EgK+-HC_8J_A%*H$hnx!(vlZV^X5OuU`)Zp z@;#kdHh9NiDv$4D8xj%{Xw;n9NQQ$0X$m$`*4O(X?thjh>>oKY$e_Ns+^IIDyPuiV zKYT=p(nASz(?pIodNxUlwioPwY*D2x`CZp zTSBCA#wZcEQg7GztLg8GTkMSVacKZDvx6$AQ^NyyxBmcMJ+KnZnq`tkGV7Ky`|2)9 zlbbwy;MVJyT?YD^Lf-WHLA&F={{TEL5*C$VK~XcJ>Y@hE+|}{VwhACf90(O7lo zuol6C0-eVDdB1DKz*N!^ZNyAty7+WAbH|%k?d^|cBT0HeER>Cdr_q~GCyF6<#7WVW zjiRNOPLyh+M3dNeqCK!+9+nc(3M|A|Ps}wi3Q0HZqMLwi5iy0Vs{BT~spiNxn(1QQQ4- zDBo%!U0jeg4o=_8`r})T_98N%?JrZLXzSTPC%&R~KmCCI&Kt|lUn?9s!FH9DbpaHB z1Ed{=a%Et84mt_5%%|5lb@WLzyzc)z-naB8W9&?Z?P_;5n3CS%x!MHzQE0 z1^L)jZ@~3Es}pvOGi-v8$t24te#x)7u0c|7e%MT@^l6Q*!#sgt&51);R1Uyx$*TF` ze5{DlWy02l%&9t#+lu>N_r4(>ughlRkyld`2Ug4oEp#gH{jjp;j)I^E0g$TMqEy+x zTnu7drV<$0(lW)Yl)EJFV)p&8Y7N#1TS<_FV%N3aAb>|Zv$5%k0VHh4P0(Y0gVyg2V(gO26_Q;CONJp&BVqs_af9VqjIv0p9<|Fz0CC3R{@m?& zOe3-=?%|bmxA6_{=iBM@!Sb$+D~&Cok(32`BWu|0>Fs!xTL-j5sG@dMVq^Gra4Bzc z52gIQFvInFEUvYtOwSg><>gU4nk4D@wp&&VM+;rH`Qy4FN)kpG zY9`LYyIAB3QZ(N8eX!KfiKd5h3>pl;(-VOryk!`m8_+a&)8-Tp{5>kaMG=ImwM_D; z1&LA{f13969q$$N(jX1mE@@bZOJEn?D=ZX z&)0FAtr80M9YJQD9LI5xx?Zi8scZqi3TzGRW0SNXAVpP=Omwdytyf_7^t$ce0!vnv zI&6dlUJ@c#gog&x*+ z_QTDYGUg;Q29&0`qEf{|tFE99+mU3Qx8JrHkhbc?r}9OhSY%qDHG)Ac6%DNhjOe1`5$;7GlSz=0+yX5*v0t zqd*&R_rhlz>5j)b?G^(q14BB?Qn4r%bSY-NT9}=@*XiA{X_<{gIfaY`K^SuyDg*^L zv0}xkvA^voDcJHMK(}J(q59pOn0-kqpMdotOssLJPblNK6WTtO+`|HvAMdv z&whX9GEVj!Wvv#O*^wfZ2z`3hq^1Bh9lKxeib)}6n$ZZ<&FFidFY@2iV&^!h!c=A; z7LQJobUS}cDXU5;l~}T!N1aB`bN2qmE^?+O3qnM(9bHz`#Y->>t;nOtb^ZI`62w_j zn$j|i2BIiw*be;j>3#1H$WmD>x*Bn)7QNexJMHu0u%32SF+i~DO$xn70B!-`53#HS zUC>fgcPtsfT&VJ-h0%68S->Fg*lPW_^epqXTP}^$6R$;7Azv_2eqsUa0Q(K_u0ve0 z=4F}MD1$edm?#xpyKGc_Po^ql@w~@~c#dZ~3r5*>1!0Vu6?O?Ea7Y#ivMg0r4jA0z z{)h=nib4;S)LKm2mzX7ys4xQm0DvA31DoXgV#+6jBxiYTOiIjGNee@3GPV#w3i|$Eboq(^o(-C;d)0Kbi3mwXl6@ zCJ8Gb{KE!PbXW=i;2rlT_rY?0AO8Szo(#5HCRC6Rx~S5Mwy$%1f<+ziEWTDoe7po` zF+Q19O;hz5LeLvliX!<3>*je9A^CTcua_n`LAKC`&0j)D1OEVP-vuXHK;5$6@g`d_ z6p_!Fl2p#;VqDastb*^<1YYW-Nnj0*wkPE`8q?R z4Qy*OmWo``>4q3cAeg|mVry}|DE0@|ut@UIM-dH|Q0jE~Nb*=VD|)TD^!FGxS1%uk z5_~XRfvj6;0Cpz-0MoV_IXD16nej}YR3Ucr7Iq-f2DT)Tx6c_eX7&BQ=+z8bx)aN) zp-!sUC<+#RCbt5}`F(E;&pQPuBoY!?Osup(0)-M!0P%|Gb11S$CR;L-^x`|1*+4s* z{rTYInY{jGf-B1Cq-;%sRks08B+~tAukv@N0!upIQ93v_{MAh044JJ8dADg(`z=xiSzNtA91n4oMxtV(8Vq8(25JN z)DPHii+B@_SxR*JH2(npvJVW)>Q0zqg@UX)Sk6Eht#Ni2meiil$xc~ZW+@m2irr{iO) zU6;wXwMG8`iO&qbmW9aHS%i*>=DQRP{Wh-t*rbocA3Bx_qqB#$AQBFt&uxh`dE3(r z-&miuq!#b@RN=2VFfOvif) z0M+RLUiM96Tz+9P*2qg6#UN%-tkiUEwQ+u%oB80aV>FQnN~)Rwu_Z~`_ur6qzg$J7 zR*FHTPnI#P-o-%#Q5Cr(x67Py=~-lVx;l*;(ob#v-2L&R)rnou-0>~PE@jEqiy_z& zH7x>cdjZ>Q2|s{N5jP8OunqNZQpRTLjaE15h{G$RoDcN>_Gl zyll)Rjb0~?$fD8^06o4g-k4lng*8Mnn)14ev*=fi%P$LTMc45H3FFfZ0)r;qD2e?` zLdsOO;B8mGo8`t5A+(;MXvk9<78|nyJN9odHIGT;FeGfK=SkJ5f*6+Cy8wCj6@qHO z9n7yoIPol+3rMM{$>U;8`T<{TLI5Jll47FARcj!gXb)Za9>)pCDV}u^$f~iiB(d_@ zuUtC>;(m(G)D^E54P<}_}fV+W~%|zSDG>;&rR{-lq0`h?6q%FjPd8u=5Xrf}8pV0>Ae@m=O~v-@fi7Sp<(?fjvPSACwv{1R zDC27^K6ucOwng=IBbPy4axFv=`N$C za=xcvEpELjN7SEFfh{sa9BrV;p?N1rfA+2Yu;F7FoVjIiSI$FRg?T{!Q(WG|erWrg zFhQ57BM)oUqzo+Tb!#LYtb6vttuD2R<24~k9YY&G3YA_t*x{IZXpqMNe;_~s_+LsA zH`=b~-QZtE9u)FPAy5W@6N^8p7#j4U-_YSXn)=BJVH>ux#O!DQN6ZEX(y&);F@RCx zMazxAK37#Ncl#fyz{8d^7u4{GG=)RtB!EqQbl=bAh>B-jSb4&!)~a_M=r25PTm0}j zAu=sWD@hayur8GQ1GV6;z=EZN?f~0jF3sL03M{E8l1;(XJ5jN}(uk}dNFKz{6?oz? z9WyHc+YO5y?~cP9ksZ`|T^NvD5w~(S_1M?X6MB6t4zTDz1k-9dfCP8kpRmLlLu&m? zR$7+S?REj*d%=q)fae`mBCSnoP3k(0Z(Hp|{P8&eGjw#Q9UxWO%?-%diU)8jinEU7 zB-5H%LlDG96j1?4^ALTw_QW-rCzYdj%pELLor&abIQe|*3;zH$7w9fk8QA9HRX~Y) zZ@2=ss@rq7zS!`d+=K+8Ln0`kF4ckCzT}^LIiX|lpcYDyYO|mKz=GAqd?fM4-LYJ( zBUrN+x)e<+EECBm+=9pHfYh5|bT68a*=18~2>J1UA%&UMkyA(U7ij>dYySXF*q&r* zcU3kcW?M$GHva&82vZaw?xYY+l6U9Z8evHREwjrS79br0G8&1tHfv+W?|*!85WA`f z{{V!OxEtv{xS78xW(}o6isHBS7%GAYX;_&6zI*+Dy}hw*qT-u4nwm5WO8|t?unp0m zedzsg{0%1ItR`HgA>^CcK(5q&*it4ri9wNiuO(jmkT)mnZ{H2U4KP!`o+b*$wF72} z^dNfySVY?B*N#5JXGVzH;vQlK!`Xp4dj_LlY;{Xdn2WAU{$cuTk@-fGvWgoEC5ayX z_~CefGfe6g)ai zmeTdfAnr>vZAQ7P=LtLy98To`gcSszQZIi^hiqB1!vu^HCd^3epo1pNNe7TNH|c>w zUUiiU9R^3W*LCg1n*RC0##Oq|w-qf3s$A;Pu#MOTAT8B_+tDp>qf`~vk$dgH#Y`@- zM-xOYAoOEoD?-CCyXNb2Lu$p3hvy?~xJj6lSr1QIogtZfjrQ`82e)C1$gg7H>p^t1 zld2Ud*NnyGK(@lETDRWEuFM7s#U_R1mIYLJp6SR8#)+VJ6}ZvOk!16abw4zn2=xV3 zEJm$boo$EAl_h=lKOl(@O^?sD;(BFQ!b+3&wTzLTr8=S1DEe6zPWrH5P~x!H8D0d-Ay(UFEE%Ny3Tg%SO%C21B3_EHQDv z!)v{J;;tE0b1}B0FtKA!`h{8t-?;^Y5182ZY6MwxQo!}p46Efm*J|`*t5~k~cpzap z9+{&TXn5q^a_DARUQ-!K9h4SuMKL$AAO$zgjflf?ujUdM80u+eK+2wPi2Od!QIAiA zdfp_lla-B@MKQ507${`YONJG{J05!jXA9<)vS|~3V8~;11N3!SLKKr{&E{4Yeg3$| z$w`{(ko1~PQaIQW%#qYa6_-Y7@k>)iqiV2;WIz>b5RMBex0N7o3yeYL8 z^(8ig=3NCrBK30LMT$zb6XEG`=T1v&ky~n|UQAe7aDRUgLrD+;=#!ie8RMKn&MG zikkqA$JBx4t%lqlanPrZ3LeaQlXCP+I)5s9Id>Xt(-#3qA8S4Szg!`ZeA58&W#j2C zrB#-Kt1uKu{wh(j>_zR{5`!+;&k;alATFMYk*M8nNgMB5wS6mmS^ioyhcFn)A@aVR z!w)Sjf=wV7?Z-H~Sk*qtr#^rz;gmxZahR53z=j_W1d?|*MESVY&J~t(62}%wwHA;H z*l2AuH}xWp-LTw+5-8Dps?q`@M%j-#uh0ws0AY%65_x3JPtj&%A=|2NgxG~W_XpDJ z-vb5B+vp~^=$rLq(HLYgM#>sV2?*Pf&!84QSnJM-*G#&pmQ6OyY--qT>TK*iIlv@v zA&)JWgQu8fD9jbhNC-PtqO5xYp=@kNCl3e{!e-Ut3|~022W+@t1Sa`z<$wUt!RxQ= z8nwUf4=i&&B6A;?jTWX*D0J3^s2BW3$-k*Qj2*J@_chz=fC!#R!^Fst&Ej9Xylf8*Qk!j2-pSyT^)>!91+%tB;G{H59f;q001Qb>3>`~k zQOhNDN0Y{pWsIz7nhvsR^}XHi^&tGcu+&~D5sNPGGJ?cD%VeQD6JHMC?04Sqq040e z2=h%L*%g@V1@p(x`C*uH-X9WNFDj7et|S#)k?{PFzSV?F;lE6}$i1M)GUG^D;|y!f zWKq@0vA|yA3eP@>qI8-Oavdvr?2GpAvA)-W%a?`3FD7hiU0P&G30)7`jhe0pzpg4X zUKKQXWoVtV3Z>ajpu&J1hoxd;jxTyDPblm;_*@jeq%Ijlg$i{QAXvWEJJ&c?V6sK5 zfFDpSS@j=U}#f%Q=dMrj-fN|j{_89-JPeRUQDdM|s&GSx7R0<@$?2A%G#S+3rn zTxoFPtqN1*_B$qI=!xQDZZjFs)_kLD8*RAy40$8h3dkHWU{^yZ_e9b8K?|*nT7P}qbSx02KgT`r2X-mP0;PFou4((%*IlprrSf= zMyezWqHEvNj8e=3G)5T|Y0`uNq$xMC`d9qkESe#ikQN%4BWf$Y_DLfCcl5=6aa>0+ z&=fR`S4xv$bKLs={@8vrkP~g9GlrKi=u=rJG=s?C@$0eVdg4$LV^dbL3TiY66hZm> z`eTkB7cFYcBUKN*TZG*<7NED+4jGauY+O{UXM-G3aQtW4_W0c0?B>+ia zp;}<>Hy^jlYzgXFVtJUCAV@#}H^X3GxZivTNUb4xi!cZzy*#lZ!1vsnJ9F!J8-|N4 z%tq2F3K!5e$G-aw_OXFc*=>v|F(zO0CroA7F^_F5)-3l7G}s>45kI4rXLgc78?1r9 zhKnd3;>bO@`rvwsHKmz86&h6Or51GhQ9sznh##d)R3H>VG)EQ%^WUGQEucb267-Q` zEv&YXx_O{%S9>%o7C9KdIXr0`tn7$m0E!9(T;DV+O>^mZMg zq)3@EpvfD=2BWYn0Rz|?HNGmMZkARiX42#r04Y1|R(P{p0f{`qIOPx^RB9lWB$q5Z zdRK3*Fv<>3V{U@}W=@kiA(}dolr>mU&3bLmrUNX@jXi`aCe}4EVLP4KqrdTxBFv^?fHaOW|l+_4Xq0V(WB}ScJH{uR45xKoH0XgvMiz%jZ_pK zZ70oVxYFu=zSv0>8e&;h9ePCz5&){RvHt)o3=|V2nMG|zz(xcCM`r$7Mk4}ij4Wzn zmT2jDHpVpA+3v_if9>gqMN%k)N^{ni(AL5mX>mQ0;x|`F!xy3kMWpHGvQf z1bLpvb*tOlU~?nGGl0>dN<*Q6P*AOqd+d4dgEvse5Rr%^5oGW|(p%T~f%;%8Zov^) zh5^d93_AV=)u ze`>-#K_wDA?y-^D$h+;mfCp|put?aehM2OG3_8IjXtu=Hr6>p3VkIVst%)B)GL)}| znSmd%`9Udy4Xv#z&MXj-q6P2?vW(W&Ntib`jiiK_cc3rVwEY}OhT}qh@&07UJ zKEmwnSdeRGC7G?l-Rd**^u9%p9k#ge6f1mn~wj>1-90K0Y$-aK06DX@0wbDry=k5L0B)gK%UT1aGbej#m zUINENEFM4^7(SVqLG4UGZcg{x47{xXSlf0vX@4%^z(p>Bh~2Dt=bxDsZlp;H*sE|k7QNn5hOz0#iDTnLWaWm z`kXxr5h?5Ib*3Rfp<1}3*ztX)@I3kfDVrQZe1-1FS$~g5G z^#I`7y@0i*8X@^)K<%Zd%oxZls|w)qbyb7E0}jU+%C#@+#?Gab5Xq}fz){=%zIa+Y zStN}_PJ|Qzloq0@K)V-PZ$Q}R4&}>36v)a^WL=n+Z3F@cW5~ICpBwqkmI~*+P@RDJLLYPrV1fB0= zxb+{otWF@pDF9g?$lW@O-7bX$p4)m3=MzOBvGkrP)C4617H%D)@(R1Hih>Bc)NioI zUSTA07m$UIjE4#ZzBAceGt zig=v7EV`*xwP^sFAYBp8t@o@}i$NsPIXB(i3@ zaDqa9V{Iacy7#E)HJ_LhLz>J9KQ9Y9FsT93Jl2eA%`0|Ox1hv)MHU>Km7i&tqB9VLf^Cb$ zRQD&5?g->zsG>QD;%vU49x#N8(xry}V0feg$@5Sd=IxT#yG#KTdLPwZ+8KaI60hAJ;#~t~JpaJ`CF!zH-gDabN zl0}g?4A)~&-{Cy!9937o7S57D&7#McNg7!7mJG*o><#WsfGm&l`qmrpqZ~dbo-@Y6 zIWo+_Wh|CM!Z*{usTiwm=ouuI!uXPp)O{T^A>2~@Be~z6eKD3~Fy+LI;DtA9ruO5% z6^&<%{)A#Dk4~t8Rgq1VBK9BU9T800@wclu-j}7dG2izmXtdb zW4&nxh~obMhfVW@ai%dn0r3pPjU6G6^$L!)RSFun8;a6yhZqE~H|FyxGm?>^O;NFB zZ6JUaxCZDQz}p9WGjYOXf#ilp$Uqh^!EGb5^` zG=Ml}kd_}46IN_|v2EijhFOy@0#FMQR>T6N4f~tp{c&&M{uAMz6$?$`c~~=f+{&Jp zhSfy}n#0WFmKU;Y38C9;Ycj_li2nf1(p~=7)i3CclE=xYrPuT)_;2v*X0o$S23|ad zO&P!PGe{m;0)RkGt9`iJYIpKL9}H5#rYI6LRwndq=i`76YdF8~{{Ra7KjFEz9vR{B_=aI46Um#*#u)%7vQZ1| zWS;orW%78<%5LA$KEHW>e#Y2CRTcdI0O0wYc|RBNy+8K+grzb%38en}H6WGJ~s04OL*!_CK~aJ}dZ$$C#0Fvcs4Wl95PP$X8aARmidF^Tt2K z{As7@1(6(qq`lGP*Z^!OpQa={uPZ8CBBH;m3*cjp=Iw>ceiiVjB8BsEPT&I4g+oo!iI@=aWtHr4 zvEGfIJ%%bXS-{_zGx%>!bpsj8qeRFy2Cub?-x|J-cP*QW9}zE+Q5c!z%&G-VR#YkP zeER*driUX2Z!I+P=Q7!d!-kef;cW<2K?hme+PA>JDPT*IPxf#Af7HK*r_-<9BjrDs z%S!RdnTuL$S0#dzq!DDDh&y+@SLeJe<>$`F6q-WoR#zj<=DHQ^F}Lxojz)Hrwn;S@ z<7n9kprd2OY(Vum%jbL<42)ux8MK)lN!`4p-EuC!*BxIQ$&320?qkn^zqqDbIV1HE z)wD|r#LPezMv%-ef=Z41d-rcQm`Ks4%aG{sM;lZ@s=zdT?MeFLweqvHdR8Qji>T2@ zlGeIz?}KL@i#HO)tqQ9!(s;UFJ$@m4RyRrs?HClCbk4ML$0NFsS(&!eb{SaT>~^nT zOeZ4+%Zm)%H!Ka%P4@lt;?_-V0=AV!>BvU+RBF`hL9^T6Y*V-|3Lz@nc}C=uW|R+| z{qb;owp^(bRY@HhZCxEjq*(b?1ok}8Ca=)qA{Qg3s-T)Hq;9}jG=84He0CWUC7 zAta3qZXyZgmca=`T9HIq9j>>o*qbQ)xOA9^B%6HkVYSq)D;!l0je( z9BMrN_!iRJ4IyV+(bEJmRZvHB-0pr?eebnkh^^#M?on(L$ny^0{+BrHYYQ}DP)Ig6 zG*k=N{Ps93FtmvuOH6X~^{^{Kz%&8%wZOMT4GeB){Wor+pkxa4dxqQAf!l4X4CJ$4 zmRD@UUbx%JRM9$GkF^tovaxE2p^P0ZxE#`gPx1%p&OF(|(#_J+q^gxrE|+%PnmZ3) ze_Sc6bYDf9$z~rj80ZnP9%>8yFuF55LgjT;knU8~0b{>6{Qa;TsSKG>8HPe(X3`eQ zZb`p?=td5ptAyKl9bBLnLv6fz&J!@dB zx_0zaUuqw00VPOhjfpIv18i?m8;k(;8Hm{oYfa~?1W*7I=){reP9qajMQEy4MFbVC zHVNm`VS|ilj2LEkLMU)VOhM9UZ$$p%esLEJ?m$t~VRRS#)Nh_K12gm@**uhNZYYb| z{lWhLe0hkAs8TSjBnU<9N#c)S0OJlw7uYEzeS(8PSRfI$+h3;Kd*HF_T>)d%bjhVk z?)D9P)xUgqN;9xs3#kWAwi@)=9{cU}$DAZD%1|>iV zsG(9g3>iiL0Q*M-kPMUQIXu(7n@D5?F8YaMUqOFt6pGJgVI{OAp$G!BfYn{L*mHpZ zKP{SVXagRUYS+S$Z{`87{@7|*nh7&f>i#GkhF>!|MRI-Y{{S(F2?UXsro|ZD23n!8 z+n-Vn2NU#_Ly{FE>RT<>0Qc?&4hlymiUe!z&0qR$gyo`&RDu{v{^Nh14VQ$% zQ_MWO5;z`OBK?8-{cx+Pl0_=xMHL3XKMnf#;{)7+^m;38Kv^l}JW#WKwfsH1ViL*{ zW+P_Pq*w)aUPqy_Yx-l}L|Zxt-AJv$s=x2hRo*ymK^l&plHEEq3X-&cuRfS61omTG zxuPXDb_@}CLB(0(s<+~?Jx&Uct_rCwU{?m~*oxcdfHU4;w4+NF^C<*T?|Pzp(6hA|+PWYXGqS`InBXz$x`iv-K%@~f9K$g@n2wt+mgxg@uxk+*zi$trY> zxYo8Ts1u~p$D$`H!q>3|$F9sd_84EFohVLZ$r)j1Qd2*elSKLrxaSWpZQ2Axi89F~ z%Be(NiaTE;a%%lf6mUa=#FEF(HaBg1o9FF`8}=JX^i7v8C6MMB=8@Pqx&VW@u6_8f zFwavo(k_VvD;s%2c|{F}8~StU-w3%?RJc`GN8YT_?r-0G2v2C?dUObG3ts$xFHNv6 z4n$IlEcML6A2}mBJi#7EHhMvTB!2v0ek4()nG2HXETd2t<<;do_QG;WQ%+R%G~Sv~ zK!bm54S|W+pbVU(>C${I;zrGrTol)s$R%#=97I+}~Za(;2!j&>LE5{pYrW8n`2Rn)# zx4cONvlMv-s9_OC(hH;;h4mLoclJ0sv0R7YmQZ3cv6e8UnOOqqJNZGa_dM~w8_mj{ z47h}yO%bOHwU^=C3g3uh;fFV!^ya^aAGN4#=#*!U9uJU=-21y1M4g z`r`Pt&=v|J*C=U4G;DQKQ3uPUdQkb++a7T@4FXP+K_ZhIT>(}`{%*ukBCCV+>P42X z(v+mgo=_;U;iwDSh9KW0spEdGn9Fq+L`3LW1atvX4w}2?ZuSq%;^>1cW9kHP6v&Je zH%x_+Jz$M05Bblg_D5`Wr_4gkU69H|jEW>JvlS#3vO%@ezT|W0VUx?8&Q~;5vhsdP z6&*RYFR8zUZ_J^no;2OpI2_EqxQ1 zqezj`*}f?Eqs>+OgWHG8GRguXyn^*IIXoiS)piE2PvweH8b>EgG9YIt30*4`mq$^( z)U6P9=Y9>h462VLGpG>dP}PX7RE#WnN}GWwfrmU>A)FB`frSKSy9 ztgDf*;Nz zBv>Az!ESHnHw~s>j}Il!QgFa^1t|=Q80>o#8xhIafq3QQ#q;RtLnq6!_c;Mvo=Ff$ zvJRq7&Xx&zSSb_*u%-udxD~d*-X9p5GYD9Q2K1fD3e2S82r`zex+P(@l)yq5IL`3_QqVOY~1Sfl57Ho-E5t~yEHu1d8@p1 z{C(MzFUv!#mt|PU7Nbz19g^Xi&ox##+iq_K)f`bl8!rB7WYpKx^KMDx(I9r<*JFS| zD;XnD0=V9M}462~Zqc0to=>u~+B;Ve5BXVXZJN5;8QVAtOep zq{hI<{6ty3%~qji?3(R~O6#P_8n;?N8>k=nYP6f$hLPsx@6=ISgLG%7JV z%Yp%8Ta!SY>$UHWxOB_Uv3$W$?79g6ZD);zk733IprXWHRf;&)O=PkeNiATKJgQfn zwm8yw02yx&pTt>}-bHxgI;z3=h5C&H)0||JshD-uZPTdRx0G$)Y6G=nLp!u^WOBJ# zq-%wBTaPP}#fczyBn!fPoP6S-z5f8GWAI5u@_vgC!?6B$#D6;dBbCd5-6^+LlTVjp z%~(6)>CcDdNN1V+M>JVjK#8U?u(I9Hpa#1h_(wmKkHT_E;p!?#;ZosRgUmrBp1|LG z;cy6|Sz$3iu_m=*(h@F++jF(;+Z<`uSI+g){f?YIU3(mC?2t%e>BrZ4Q4CSg_<=R) z&w9qYuNQ@Qc4sx2kTzp6BSi`$k!(;aNXL5s9SARPG43&s%49=MVU<8ti}O>yk{D&i zkje)-nw4(7aL?i#>t-Y?3nWO)10)1#1Z_h38w2$i#hVmz$whxNL}g0ZSL41bA!dpv zL{Lm^(=@*dkzVu$-lXEU!#pQ5o~UwAIaE??DIq~M+Zt~S{1=_gO4DbVGH}MR$u!xO zq(#`1^YjOEV;?~9{{X|#7}efcygR^^Om2j9=FYGx)=wc%6g@@yVrKKO_^{!Rf1lm` z1*l6Sp;0CyIF4%EpVN<$NXp zoHco*K4bUaP{r?#c=w0+o>p9v%rY=Q+98TK!o(<@%^k_u_xO43j8uLJ_*AhzA)1>q z8&0Tl@pPI(F1*davwpRV*rhD@FCjm*D}UFe{l9UJJQYHKN%*(JGFge6!r_iLk~(J0 ze!PT+EGQ68`+bHZH;R5F@i@$~L*kNUJ~p^KhR)>QmbmrXwl$EpUL3rVejEHhmGDmw zGU-neoHk;7!_c&hFx2iz04fTmv&JRk5`WlJJDBk-=4116!C#fkjWe54y5(PnF}U(A zw-#{qn;Mlv9n00be!iDq*IvNkC)~fLkDbYp@u&k5(Lm6uzKLcQsyG~1BaBw&@;SVm z%@m;-ZHJRFWG!~E0rcYsX(Jk{CfC8@cNWW4}%f8;&yr1x|f_PSC zW6$Rw@dkcSq(>F113*OZ#NjD2oz!*WaE2QfZZE0SKAEf@GgEjBl;$@6e5 z`|q|3#V3feAdDAJ)Yi^aS)o*HfkL^*L&fu<kErEQOb!U8BDA+->9@WdcuAC%bP@>wWh+LkRRVYC_Q9LOn>>@x2y_)B zj4aof+JSqX zut~CZ2X1|MygM>U0-3|AGRh>4_G=UX^&sENV}VVSyu_-g>ZUTJgl5n+SgP%G4*s|m zAh`VnA_7;9rYzZH2U6*wz1Sz$eZ~l8sH2EBw%Y89+|>)?liR)+0HeHP8kMDd$vCGpbBpM#3jgx=h(|iP*o{7=S>}yEz z61Jq!?spt|AN9hs#Dqk}okvMQ3Idh$HJ{{c?^TDT%q-=DBxRXE45WgmP1#Xi!vOQ< zN>Ko@L{ww2zCk|5zL>Zs#0QRw79e#`?jL2_yn{I=4Q()cnD`D$4N*S5nCK!E1s+3q$U3RcsK_>D3cGhG%67 zu{Ky)zWkcJaU>+fBqrE_U91vppIwF`&K-6;4N@x;dbeMo?mmOp5s-d_Wi+O#705l0 z7wKI7*eWH1M*t&>ED_1A?v3;GtQav03i2(K>EQ4E{{VG}bV{l->HtYBdsW{wKBod% z8C6&|vLmIl{6Ou$VMLr262=5f5s67uDU6mhLezf0B29Z>Ec$xR+R0f2mRJrONd1Y& zRJf6hRL6ZH{{Z^g`Zw>sI};M6bpseG&OqIOWglOJoJycu2QGz?w3cLy>f8;@8XsC1 zl*dVwtb|a~5mt)@&_NaT#6_*@B}gn-8rW$7{QKfaHAfH(F6o<4zT1A_V5l1g-=gdg zZgsF0HUxkN9;UGwT~Z=rcSQz9RXnf2M{dLe^uZ%tNf``O+6TT3vwt9?>yFB|qq4RadrmX{NvqFjW7^tc(v#y{5jK=JZ{&@50csJd=tahMDBn|y4 z{&*=Yk_|1a0miTO{`SOOPn;+&^tn6EhY%yo_%B_cim! za-mX4C@BubMT2%ov$v+h?}#+@+ayFnyP<-H+T(BpARKb{Mb%CY>k(nu5VWOrs)=yuq&aFO*)-=5fqC6SFb((b&CkHDVB^kh&<|BQRqZQq7QcNyVq{m zjCq*yGcm~%1EjuJW9I}|*w)P2w2d)}_6ljpP_aKv zI)o@%7TQL~efB2;8IvAidSyXbliB6eBh!^XqeXz7=K^g~9 zm4P~1wj_GiEibUo<{KxRrV%AJwnA9&IUC=z6Mc>%Aeo|uQv|XgHo=7{^6$sXoDk0| z!x)GxD<@M`ZSuXWgSf$juhl@TVp~m1s2w(-eY^dAup3)R4DI$Vd=oZ+se-7qWEmHb zEce)N`(jdfW@_CF6J|K z0irJcU$z6B-?jPt!}xJtg>xBcAO!?+fWQJ7fwMz#u;=!^{Ns%BGcK~lm;wtpuU*f6 ze{4zf4p}voEK<$lQM(`Vq2O4n+V;a2Dz*cwq;^cJO3XpmqK@bIYRUe8Y z3%BYwNHB8ePQrzatak(Nfg!YT7?xyOTObk-*!TVMVKfq|NMj|{7=p{VP-|o9>FIN7L%H z6;=j@phve*m@Zx)u>3Uz$rzMD3+iSkH~vq z+7^MrUM+N5j;%;u)xlm5uLi6A@Y`jMVp)1f%}$~zhm;t=EWV&@H~H2Q2~jlhxFt=V zqF4jg{kGc|*({^xgMLbtvd{YQw5obVZ&aNoH>4IV-iuM^0QIoYgpn%nqRdn4UFLZL<)kZp45%^I6;hf29PXhRs{;?}wwGrIwV3R#>D% z5o@bpSxw#P)2K4?2*M>Fn0kLHbfuHS(OG6PuyRXT({3!?^I-PuF{JQJ$kUriL@Z_o zL|2V;f*CL2Bh>Nh^8+}OHm8~u{{Z-PSp+m0*GN87V{t*K_Y^CWi$4X-O1ZBSf)?t_ z7GGR$s+$g~sXOdJ47-m0o8u;IZc)(KWJ&BliH3aqfJRkhtqfOkq@B$jk0!C4oCcUh z5gM3V5-Q0cADs^V_}6%*b4Ecyj1^To8b8E$#v3~E#}Sc3>XeNFz}$_v?d|Q1*e+JT zk*-vgvK}EMW=D@N=YleIdXOtg+iu+U#U_CsVo+P)6bm3=#0xtR0lg=l%f=ntqe#n3 z8Ma_G*gNgmk15;zc0~@hDCwj=!}tNcBe~cB+;%72oON-9cvPSXBpb!m6vV0?PYQ z8_OJp97Uj6eMZG@2V=!B73f$DA7c7BmS7Q^htw2S)>*e4*4mAp#NxliGZVa`wIGlI zM)w!yUQ~9#%sYA zC7yX&Rq}wKD)ZjO{OY}NqwvYZ9uqg3I3&zV)2;si!(F-hlk~uAisQZmh$1(oauWYPw$`2YYf*!tr4!qx=v_}4aqT*Rp*BQBD_8U?_*7762wRQMFx z5j6R~6p?g65RWPavlFXPdK66n2HWm&uJD*rNHWmmVptb4x)F|GEV7nnEZxuX8V9=Z zwm6<-;|SG#KR>5uPYt(WvmO~UcwEwF=8yj2;wWXCq}o;#0IDf9wGZd=!Lr|n@}3_u zW;5&g`QQjHfsDw_K@kA9HbnqIB>i#k7?cTS$_UzadWFE0)an4DPc3_FD19hkJ~QKU z_$P!x;vc2?oX_d_Y(%IC06Oz3wY8rpJ%J~Z2;#@$H$RP@n_VwU^Xkf!-BJ_~=5uFT6IKcU!0v1s8`WRW8xI2fK607G%BEgV z6Y+pmBZ03(0)R;+BZ21GZvB1F;opWcaORW3{4XmGYsJKOB%(qi$_+zuLHM`h(BI1Q z@eX?<_+!H#!aOT8uRP7x&1Y6X3pv@Xw!2c^+gQ8CuOrL&tzyOR{XVbl{-jW?mEIxv zsj|7p$bJ)7@sAt<;+w`Ii0YwaMV3GSjy~F`7TefjvJVExWG0;W)W7%t0K^%EktE5@ z9MD0On^xv!eJ?3r;;;}ALsel+m*RZZL1M`GG&y{n1g}wGfl;Mp1luzCh`(C{nl+6M zbpHVQvzA{O{8m#p_>;w9n=uwylDam-Os+c(%_kYvRo-&&8^Rdx0O zK2LtXr~QNcS@@5{d>0{>@aD`cx!GbOQxX6oy)3b)+^VX8NbX6#__6#u_%r_9k&DGV zdF9Kj=vHzVt1^;nPx(+)6R|cuab5Te@Y83ZpYdmoNm+R0yt>RlIho0-7yK$)Q8iy| zOYzU+74Zog44m3>@$aUzmp8FBw>y0X)r@WqQ(iBR{{Z`v`uz~jRJp(8XTKeOD9w1o z<+Axm{{StLMI$Yi48fH+)XGWcx3(wvbN-$W6v{^|g(Zi}t=T{z05;yhBF_8K_uCpT zz<-AS0826AQ=K`9RilNvmg=H`WU*Sl5HEspv1lL09xub53DadNPG(5YQ%ZxnuHaSJ z1dYkQF{3Y;9~wX5ynQJ4{{YqgcFgM^#D5Zd`x$(P;iS$@{{U(H58+TnD-N`oxd&E7 zak14S4lEsj1HBA=1Mu(t5y?xYf5g+v;*yd>Wnn!SmvW}mQDe6DdD|QR0K=XS;lGFc zHWA@j81l)}QKBdri20Q#xwF3b(q*9w)C65hGLh@c`{Y5*t&mB0s|^}hH=HB;Co_=*BVW z7+m*&H%{2k(+0~j;q+cA_t7pj4%Z(tAx-S@}qIkEt<6S?(H$C2xb{{Z_` zE#+iV89-oo`5VWg@xP8|#i_na@BZd2o*p^$sQiHWby8Y4MqsKQGAw`xzox_60o0MO z8bAuV2?_{ZhW^xd`D66oj(^k73Gp>WIXt`$6{=`QPEG6^AaAj+u0Cbr{{ZxlH4-TB z>BP}fbmEW_NF%WuSvB?;^nNe#pBp-<{IBkFviLb+xa|2*V9_9vlq*KSSlYvCudVa% z?TE~(%GMYY0ALGmcBI$Pe%Q3}Kg0RI3DUW&vj;$|3n8Lfjcv_UVHbs!&=&zDER;^F z(g7D;`1#|%8NzDf&U=ean6!+>SrufqvQ!r1xfk;N@dgYdSq0LCQa!+L+hg0;5`9uh zB&!;MHoH{V173jDZ}i7ywDOQN$m|aO;;XRd_c(5WrbAf|Ds{rBdq)2NF6u9f*jL*C zn1>UOg_W5506(VqERY{Dni>oS>T37n?0=R5;fN*1?Iei6@4k`!{V+)&dJZLFA$ZLd zmDlN7w;ucA2;{<=h(D(wU8{XSU%w|1EDWUSK!)7!{{Yti0DM9>Du@6=JmI(PW19D` zZLxHKY-UhQj$OR62L_KJtWX~{AI~1WYp+ovaDJmrQS|mW_OjWD93s{K06GH6_aD~* z%bP%~pwl|n8)+u~_>Z7a(M6P-*#lmhrW!%qkN&{pq^brHsosTYvH?$?`142;sU$!! zF$Y0?ih^%_j@$h(Aux@YBC7P{2Fi12YWM!{0aixH7wb&@H#+(wr(^#BWvlQ102m%# zR9zBH=1{6x53a6$xBUIFIGThJohuuX4THGTV$UFXkJ|x|BDCE;oUx;d0Bpc8`quk< zVA4Rg009^)B{9*t2y6mB(BIHw%FS`p9bHu)5%Ucqzx~`6F?o@rLNtwNH0%i@U_E@; zPS_+wg*t|n-uLzW>)QsB0kTx(9z`z0L}0n~1Ofj5?%=53rWH73DPyy1H-EMR3|A_v zgzX_GdJ0G)zrHYGS`g_J0}vZx4|_fPV4W0+kew!wE~Q1>)dNR=<_FgVK^e`gM~YEv z^1bhF!*A<|s)-_1V46cWwT?HhVa6d^q}3o@6s*hLjR95lz-Y2X#D&V}P|Bp)JQ^MO z=Ih(r6J?MUmN^VW9cufA1a}`R$ED3Gbvj}<#+hQoBbcb6>@}5nBRC6?{;xeX&}44QppKunVf=*tIwD0!=Xc zSJMU3RyASeQq`!D$m~5gujh{iGHZ1U<`zgS4d2rOwK{-}6tv4nk^$e@5% zVt4)V6p=)6>ZusH6et}xXMa<__gEuICrRaU6fzc5Tbdz2KA1yq(V{VmC78<7W)v=# ziGoL9&;XFtV_|>K0VUI=L8;J%KFUpyYq1<*tkXv>v0;x=UN`{myB_vif7=N<$zj4O z+p-W1SfSs)I2irHYPJegr(HD6u$?<|zQ+Pp6c`jP4$4WgE032LC?*PItw^M7JMaFO zfn|DGkgBWIi@w*-(+`M=Y$Wd_rDTl$q?@%0sv@=mhc~ta0QRnX zVhn}2Ldd|dv$;@c`(R3YM5{CWTT7I!NiPUIF*CClu|AeIv%*{5A)3sq?&Q+xfYId`2p zIs_(S`h$73IUp{QqK3ee?^oDg7D&V;3MWWAMy*Ae11X>nZ~|e5Q62)YV zm;$prW4P3}roiMCJZ8YOD605SY=7C_`cd^07hbNUeFB8o}ng=a35Il9LD=j=?yyCBJ-h}$&Hy9a6 zb46J-eri-rNNA#$(WbVdzz}${N3Jx!8$7w3&UMS{Na9KZNXLFD`Feek@RM25?6 z8&{Ji=^-XUv^?Ce8*{a9?Y-mY`TW2IRD(+Tg)y)ytI+a3*xLAvj*`y@nSvHt18W4` zR~LB0=7C(o9Zm{3p|Jq%UtPKO?~FboNu3<8M0!FP)E3m6>j=bBk$1n?{P4Q@7^!wF zG`p)eA3u7d_4)>x)!G<=q++3aSC|4xJkYE5_c%%yM(R9>?x0ZeSs((>cgf$s7rz+k zM(budS1krI$pT8Twt%h`WM%T}7Tb>YHEr+TYzieSB!YJ+(os@XfeL$$DM%x71yzZC@RLT_^=kw>@R#5NEfyaC}qqS zE|Li*ZAgK(O9PYJf4?W(8QEF}%b3$c1=UX&cD+nauU*MIfCcx%&!a6AVVXjWx~y$? ztm>c++;8tk74nCYrPd=iN`=LXp{Gv!3%fnX$Y7S;mG(Q5cgp7_g;6Ea6oh|-(vf=* zPZxdYV%8EyUL7grWno0u#q;@$R7cax=4FUT=Tc||mv`Rz`t$O|i%gJ32BNwy!QAd! zdm9SaAAP;fGiHY=etiuXrNs`xmXV$?(`r^_RsrY`1@J4=8?Ol?Do077i}G1N%v`?+ zd#xdck~#Fp z1I%3GU$^&kbuhYR-{H>;%y`~+F4Iaf@dsv+l{SeK0OWlz9!n}pzlo@nlm0Alkz>Bv+fB)ME9jI--%iq)tXuk}29%?}Ycv3O(9O1ZbHPZ}LPb(HQ6m9|H$2(%Bygl>Z zh-5?$EHNp)G)<2v0k+?IuY6ekF7es?6_dg8vh<}6B+j}ZBU)_~YDv8*wGJ-ZoFZ;n zBM%=PU4CEY$2yCY*{$(k9q`Y<-VHB=_!ebeX)ZbQI{J?x+_}~}S?$g~Gve9VW|~>V zj8bV!g33<4wl%kIFwc!nWX+tq$R#nmKuWLh*Z>W_b^GGS!F(lrYc~aCfT zY!aSe6>oiHh6DcqvDgeh#__TESl(H?Z*BYh`XN)BoRlJ8{{YkP1%tr6K5xdKh|^wP zDRlT=S<3$ae1jeK{+a?wQ(<@`G?s{}CR(?X;wPe~Pd0=;i?*SW{C@~?${4}vv}uZaF9l+8Ii z{{Z4d)f&}za9fjmdSDbXZ#=bhmSFllA?n_zVVwb~}^A8g#Qu(JPK{gq90=;kgW0@#pm%;O5DC7(@St~}Y z@M!bCG+%|hIrBa#kbwgNNJRvTU|bQg_C2@z<4t4n;XW#>{e;Q6(tgjUK4MS9UKQcl z9KV)DN+hqF%xv%h^**)+Hb3<1ByxTY`N>o+V>2eVwkOHHmG;J*1m=8QAw?yY zPM(qQfDY$yJ0EO)cf%bb$|x-6EefGs_7r}7_wA1t`)jrkat7PmS+GQjPlpKdIvgWiugU3c4zmf{x|ux3>QPOfi$2 ?x!H|s zBy+{`TO4U2$h1b>GU@|USO;x~YXkeN0g9wRXl!VcWLX^9?Y$pN2O0~qZ6fJFn_C3~(|&&5m`^hit*(msMxr$P?T)Yb zF%8#C@nye@V|bLZN7Mm~WV)O3f_q-~SURF2q)3Kn({UqSfoGpg2*FY*QKW-QTUWL% zBt%w?^n%DW5X)wtT@{D$I}dA(RyUH1@)@=?b`E2V^FnB0IncCT!F>&3hW#C$e*V(}ca$0eOQ zw!P}T4&(R7?Fk_b1u;8pbnJNf?Tn|7_!d*d;Fb*bUOd`SI$T)?vA2e+mC^YE02D#%zKptKdIo~&x;8f__rQ?52_$I3m)2>41Z`FA$^P#jQ2by2 z08USZYR&jSnn+`RG%mKw>PFj+HsoXGl1I~+030%g?W(`JIQJemlZ(oSENtRt!#pyn zAXTNL9#(BA0XxtHU4MKW7or`+EK2IKSy#5Bzq#$$ZHNk`^O|69)u<9S`(iXokylh0TszK!%c`5~O#@ewe>P@LbBE0L0bK!HZE6CzhE^ zU=B7tkH}+>(?taUx?}@k{{Z6v9lg)j4nml`8r``7u%OSv-us_Uf1Vk2XqBUt8a4|j zm+XG@Z~>+?Wz^xu)u-VixBJ+{lT9k@zEZmZwVrvBs(|&*ff8VY+nsl|3)C0|9()s;{JPx$8s0Kn$YXtA_{{SP`17AR_?28;qq5RWc zpuF}JEPC0*!Tmdgg#feavKWO2hWZ`3*kEf4YF4rxPf>#nKoLid`(MmfJsPZCUodj9 zI)PgY6iD?IX8u@|LQx*Nx|L$RQDl;LHV7ZF`e1Ce)r`1yAw!+qNUvaePY^fmUM8 zEvgo(8#ct(akc)~=xn%*oopRcD?7ETZU~`Yac2ufQf7L<+cdkYMPqTXHT^gP63b4| zy0T9!3w2;`K!*8pIQAG@kJXP;-$N{(n3Zt~3H#+9W8Wbt`7i zwORGX3=Y-~xLRbYt%+x5|eSf|&*iiKeaLf!q0vB!fyZd4jE{$}>tPQk|7@nmcXn{P4smq2OnhcvQ8qjTcut zX*!1ccdxHFVo3;XzQM&JISEMi#+96?7hSd<{{XSUa^hrEeJYBSRC!aX6^7P+FE*RK zzm^?;56#U2Gbl`j2_jd_ES)4OTaI?KdtTsn!s8=6P)f3+Ss{vv0UB#(vF}tkz?Aw3 z`K-osr<%@Z8f{bSWc;NGA>7Dw23gjp!$$XSV* z%wQ=t4xmXOk~ETPsCl&wuJH8PMey9rc}6QV$s&W8M=7L@?2-tzZ6L56k2qUf7@Y`Y zjFaY|AR{@I&<0X97DGb&9qaeN^HQqitw@j-3+9!!0*`-N=C;25rw)E)%(ShTcAi#e zB=>e}En~}e2Yv9?WT`_cE9IS76)L1CAw}<3sFJN*ZhKY%_BDOMxqQOO5+sn25wQ7Yy%ECw5#uXRvOAelF!m&=&~W!NG^b}w4gl4 z&7@ZJPWVxy5=5XSG_0W+ZNLJ>*7gBCw-{y69Bq%}k!0$}n9K~C2+XmGuThl;lS2h0 z6Kr)70q=a_g`cN3X(5lNA$TN~CTq7Ae4qkQhBxIM>wGG!5sl(hX(AyDviuRKAX?pW z2tBKDzZg8gqY0UrS!EB@Dp11Xa#zsXRU6;uSQ^+`=s$_=QAk4bE%^8vzBZvT1cT&y{;2IRmw<3=lSzAIg=8&wZJgB+^QU#B1D>)VB6=YR+ zCyK*4q6uk~J3P@g@p2pxK@>T)VP1{&z&_I>TA|d<8JZGVBN>C&5@>Cz!vRzP4AwT- z->AjshZr9KkpqHg^KYVegFacw$7%vcm5Qx-zR1v|bnZue$S2zI z^Zd~D#BF+g1`kd0X21Z$0xC&_DVUKm*XfN6$*-IC?qz`>e)kkKlKU^m-=w{Jb3T4n4 z2#oo60B*Mf|Es@Wh};Cqa5vSCt9NoeY0ol`}=cReSFN@vaz`KcO=k3!n^ zq38uD9jn-3zXFY?@lOr`LPgb)EFFUg%Mef0{`kyKGMQcCEHxb}O4~77zWagO8c)L> zBq8xJ8-|WXh2w~Ve@vAmKg!yOt-&~6e0(sqAEW;OLAa*nkv46=4Y=^E&U3`Gj?k<^ zhHY1Af@~)@wOGiTJ88C8U|qXboiRU=JMp`Np5db1C5d1r|~X0Ge2X zORC#SF$@&n78D!*09<5pl73Ugd^$;`W{l<|V(P|+>Mu2XRn>Z92ZxTF^Lu( zGD`9!S#=Q4Hen$`q_Z(DLb1N%QqdpfylMXc8LSX~Ae%1%V;6_Qxj1RTrHx1gQ0_oJ zyx#a*z+qANk1afrYmdWq=Ez}x04xxT+>=0U+uI4|C4A)1B#g_|%xf&ytcN;@J8%Bj z&ZP2YPtsrO{{Rd7jg>|$hk`R71MviK9JH~$i3Ku{#MSSy(r9{{#=G!=kN*9TS}Ncv zDtjova5l*wKKGSOi(>IyNCvZZY z_!K>`9#Z8TiF^M5U?H^=XW`RHn*3R;x}!vz=0)PK%^EfRM9S~TXS|U&eiwF zn)9)^$}##QUOlF2GWn+nP#GGOkMU66{8+Lp*9_%zpQ_*uV^x+U?!`w2{{XLSXY%>2 zD!Y|B6oxIh-pJq6>&I*{7IZYu3PU4a+v~Qa^cBYU?~XH$htYE`WTH@Q4he5GfxCb5 z<9rC_lx~F>5x1DI-HAL3`d%xOIyx$=C`H&>??X*h8~#|}&K3$prOJR!DK-w@>COej zh_;U~x+w`YR4TT+;B$srQe~2h*i)n)H?MzuQ6%~!7P4pr3e)S(AB!-iYIb13SWp7^ z`TcOYMvzE%Rfzg%PkNv>+t-XkKvHy?0g0oy2K`0i2K_v5$A(-DqRLXw~z zRHP6&v4K{I&N@;TI|EmCHvU*x#i}qgxHqMMV_&w!{czNKiqu(jascAF=K+xzfB~R> zFFnztxb6GmaxR44JJY0Hp*l$$Z}RQ*$5%p1z$hUF)$xhSg>;tK71LL?=ehb~5~iTQ zf~4Kiu6V37nJ5Nf!l@?IP(A*h*saZ5U7sm8$ItJ^EsC09WNy0={P3xT7-@4vkTo5< z`~0vi{!+J+ff|8_uKup$Ibk2@E3-7v*^rqvN^YJ(afzB1e?<9#dAbgruh4N zI^sHZHY;l;yIwk;Kc7DzaC9#wC}dS7XXT$B{{W`{0EmAN6Ev_K z&Q>sV<}$1uPaqTMDE%>zloZgat!GF(@m_D{-?l$t$qfDrt4wm{G7=q3fZl>Z^Cc$d10N{{U#?XZ{&`#Qy-1 z`IlQpW+--x^(T!8~*^}$L)>h;-C6ap758QyeBYo zX~M@K0!^P+V@K`ZA3L3M85t!^(oGwr;Ih_|D#))>k5z-p!{tYoDOty!8R2y`M>1}) zBw^H%@eB0o07pK96G@d$T&YsXS@mk4QuHa=U3MR&_0X zRGp6<@!qf?t*dK|ED}LACj0*Y#@LF2(ku#GGt8-EU30J>{jm^4f@&E;os)u?GAwjl6su?Evc-i%tA8n>aPta<}OZ|Q?S=MG;gp+jb$fT)21#yEG;0CGg@RKW1h)5kaf`ANW+kLL z*nwxvH=|qo`t6Rrghncntd>@vurxszeEQeh3xaxVWYmPADdhkX4+GntF~K1kn43E( zKD>v35kmBZde8(nNrg3HBU&Z-=^t9$`gvPU>8O-S_=~`Qp6`IwB?O>__+7&KS&* zEQOLlfwJo@3pUz!U!C9EoC|g`GNF}q>NImJ7Fh@;>CW^=wS91Ix#~ngArZ~csw7dr zUFy%aDMU(|K0f57c0C z$k#XJwF@3nY>+GY)+U+mm_?2|Sj%f_BdRr>&7HPwJ68keh_s)lDv>~-QER&IvHt*k zE8j!((4*#uP3Q|jrCO_Vj|Wnw&@p2}e@(U>&N%f&LElvo#?{Zu9$EB9x0G*W3NC8u z`Tm%1?1a1a$*Qq5s2REB6Y2EBSxC}U)h||nEY-azJMG@Aeeqs1Bq<28YPHe`u6e%a z2T9qWl|)1J7%EL?T9LWf8el=(S8coE>R?ebYBoZ# z9>iC_Y(JIBOqppS4l@}>DPDK-uSfA3FK0)@jy! z%E@y|8MK!K>sQrg$+Bwo6SZMb{S3g87Mb*k0=WgMB?N0BNIM3u0NU>G7#&a~C8L&E zwGpM62CZA}b~_IAF*#ntQK!CxWQI}+Ax!LhGO>;BfH$di17NQ<`gOwC1dlhw_ViISD_xX0XU3JCSCskF`3I2%R3=4 zySf72U~5VG>g{#C;_mhj>^wS=O_n6PNRojR%Pz1*pHGKRpSV0$!wVxkk}JpLjmt}< z5TLq(7;~@%n2omE*6_&@D#;*8r+|$UHn3SpBBfn4wm}D-$2E6^GA2VjVo+7xwMK%= zF`}%RcifPsiT?mD`{F&97eh$v6i@kx%ff?7s{zzxb7yhCJAu7wG&|zKCH*DIk-fT0 zF_TyDRA8O9BW;bE0P(UtQlc<`#c?ycYqG^{ZG})8YwdmLH!*nfjEEzDhfIt#2B+4n zEOxS|eZ`H5z^AR3F6{+LzcBGM#aY-qCFq3(kge4aEN*-E;}SWMBDs??Ac|l~li~_U z1lSwVYNOcSYo#*mOeMf$DPRF$3&y2Q62$Rg2cf#!@JytlPZ4cwOiL4%R*b%)qN7^n zTK2u`gRwYUTNvN5`6Xq_krdR+8v>!nl@WkVRlueq!Pqr$aetL&Y_x>TjFE$;&5}H%gSNnSyj(72zbBnyS-Md`jD&+j zQ*H+Lzb3ssaJ;F~CMULEc>ZN(%RnP&;*mmrp3P?bz@Enay#_E@_=~cVI%Ls_YD>Q78sbWhVnf5$-dSeaZOvyPm@)4wlE9`c<#!nL;9V2F0 zYhuGPko6{$;6TNWs{mDVT-8-?ba=d*5mp1e2X= z1fb~}$*^>AN$*1U`J6BiPn-!hZV)7qq%s|ZfGM(hplXNT2|1pN$P8qKEu<8yAk__k zy@>QUY*vyK+it}rN`(ukdzGv$0&7UC+#idP>5jmf84EhG)DXpD#q2Kjp}*XB$E6D0 zCL&EfYA&`_HJYJY0!bI@4hI_abTh_)Dl@jKazMHudesX%Va_)i6rD=Tg?r4%Niw6b zLg#Q?U0r((h8eZgu2`uMk>^_>sBvDyY*l6CKbNBJ@~aIr4UoGF?PjrM9N{L3#-g$o zQ^4SD&mZqt!ImyEvwS3)M|vh%qF2-z_92{Y$^LtPt`2h&<$O_P%SiB;{L1m2OqO=5 zJNu3*@Ve$6bXJpIwnC!D`;qVb|)ePlM%Y5c$ zZ0yqJr)Dg78ptZtb`*U!0}AJ4(KIF^x`L@DfdJTBvFs1;iFk%+nMPtTGy$myrmhWv z_QDX6Jeo;n0RSSmtMbxGw%*Q0IJfXCkp}!UY_N+t3 zvrU@K$va9s@(Dws{{SzeUo}VTg))93t(yGOpVnej>H?I|1-AL0o;w_<=^SHNWMsJ5 zE$TKn`3u6Mib`rtbf~$_Q%RRU_j5OdWwKFaV~43SJo6xDX>@|Bqiuk;C)AunH7{2s zm5r2H5+IB|l0fh6$@Ju6r!Ew{O5>*zJ3pC04Y=6%J1Am12 zZ@u0HVNic6Dp?$rv9AcuYw?_^n8^Fpnz} zhs>AQZW-F5ejT2Ld{#yaTA=C@?iA`Q3auKj0d4)U^-m7*He^@OVnK0+HSpQ&Du%;< zmOd#h{$nqgh{zfjD0I;dHrmG9-)w6cklDZkBJs1s8yR?fLmWH@e3OqWa05S4qA51nGwb4 z4Kk@TX!bjAjwgr(ai?CLN}~AV*jM~-jwA8A(XBMilzu4eu@b~l0Nd&9uobo(hr|ql zL)%zUL5qC2IL763!C3-)#?^SSi#V#N38mGf0aZi${&+<2(P-9Xvp{IEZLe0NIHS%(|!J*JR;IJmimO1JJ#gy>-|1>A~_M8Vh;L-ufHPz z<75dUTPlx(NwQmTZ*DQk2t;>a%Ed=&{rSWuY6X{70CrjxzkRC#>Z>abNJRtQzvqlo zi4awcEv=QZe8iSEIK+A-qa^LN{{TC$PkbId?MNX-{{RQCelgZnRb^14%6239AHD&% zVWmh+lFtSpr5^Nn?ANR(8GvG7C&o73t%g+0@pM{6I@#~FDGNa5k0Yj6x zJ5Zu8eZSKdF+|!}Akc2K>7ZzAeF@y+M=Y_H(mrLK%*B~wAC1A#%s4C6L$d&OqI>&o zj9=mp!2S>7tP((3B?n7wv8wEBcR2fZi})Nlm|a>mvYuOA=pH|oK3({S@Ya9AnT}E3 zc-2{v3y@Dc20uNq*zmlXm&qN?n0mH7cx>sfd5xxG{ z%V+-p2j+6|ODWAmBgw6_ND43o2x3Xwo$DV#@gERjHdZ>7WHJEGbn#%(JbL3J;yx*! zjZ!ueTf0~hhYARyPqh>F$G!1$`BH0?tm63|FO3Mj7|lQzkVdRb0o0Q~ZmM>#YWkcC zKtrm=SXNm|+1`SZ*UzJG^1}@nhd|aeK2fM41TYp&*A!Ok)L=2@BV%lg-}vr?Eqq!T zL9dwy<#_Z;&pgnmvzS*XNJ?fGzS|w^)cSYFnh?WN5>zddT-hT2B$%T(BJU6lH4Ek zGOo7H#OnBIw2I>Y0HMcKKm?0K)U=@TK<4+q_rVM@2Wa|okSG-^#ZljHZuqpx_)NKo z)1{h~INfquTnVdON&2ntOY;2F|_rRJ5c)~^`V9H9b zs2guvp8o(U#{0wn01Wt6sA0|KStnw#U4oZD-PMpiHsJbWYvEZ)@>yWnOszo7Tu%^C z3jkKZs{n1t#}_~GTzG#>Z~GlAf5V)=Q$9PI@Lv_oW+!O8Gcznkd73i0aw&~pYpMfd z$KMD~kMxMxzwp*Px;d|KjC=m^)vH)dNT~<6=G1TCjkXs z+weF0;pjXDT-zyG;f59(RhWeamiwKFuet4yvbT%=81XEpiAS2qGdqN45@p1+!15Y{ z19UX(DEeVH{{W_*1tE9*KjK-ergkY5D3?Uh)&BtevNd1UxW>o)W+_%kxIenJ>;3+K zNBC-A{{Tz(ug~gyia!F)OB2r=Pb@mOeH&^Vj^qA)ao6z7=jF2UI-M0Fd4L}YzB#d| z<9C?+3*-412+MeR@jSkkQ!j@x)OS0Ws2|(51DDSTGI3@6L&N1dT1fcR$l5_CPa|+P z?TugK-1m*Ce&6;F_$*7iN6+c<;@OmW&ctfnYjP|90BibSk|Sz;O&+3XfZ)-;Y;7aT z_(zJ&HO#4)kVZ^yx@y4(&UO~mH}%GE#Jmn)4ls3*g0{ak0tp0-tF;03#@t?1GCnO+ zGGy^*1#xzTqm@e)0O;LCd?(V!)P3+^sLu3Prs_0<$m4ChZG_cKqLezTF~j&RLhSxk001@s@BrVo{@Bw=nfXZsaDP?VfNofgho%1G?~X*VA_51Hf$c)`76h#?b1$e&f0kda<2Hu#s%g{1~?F)>&vPdJ#q1gcuMl~CuXJOx; z+Zs>8-VvDbwp@gHb(1yl!a)kiO4EYPRBE=|9sNcr^6IfHd59DyWhT(T=wy&EvD?a5 zOJ1T*;;ON;{4+LX{8nuKRtPo2O&Td!x-Qar8Ze-S3RZxlxj2oqn3*dxxvvX@!?PK< z@@*sJ;>RWGkw{pbnkuxY+HwIr&!FNLERu>4Pefsbi}eNgEgc0N8=|JN*O2 zJS99s#oIFuWpWaX&WUpCVkCns2@RKo9EZwzo_Xtykba|DdNG zxd0Q@aT=zzW*n2|BUNrTpavS&g{Mrp@vr+Y{isFZOk&K8vQ4d|{{S9FTXDZ|4FEsP z+k808m0LGU2ED)tNa(5unt;b`FCRWQ#4tp>8LmYvo1$3V%0It>nAe*DH z+niNKgD0B`>{3Xbk$|=ax8KxV4g1kO@ljNW_Uylxrxfr_myzbnMKy**reH-00&W0n zY6k9z?hXqJB$-!^L`8O0Dyu5evL~2}+^g#Z4X;VRTrD2Bj4@9UX<|a|2Qr~hSOwq^ zc+;>qz`K@N7B*P+kTeBhX&XkNr&gQvtwN6C&33@?3iNfAIxK%%M45If)DQtudD@tbXc#$OK{U$OT#sRkoYW=! zKR(dLfU-wTxFBm&MDxAfcJ}X669$TNmrkivl4x{9e8WzmLx6m)NaX61eWD0E+I4gy)6FrY%4 z7hadQ@-}f};+ct#X*7TXM^R4A zF48!yRG3$hgtM#2?WL{h^$Q%f+a1XxoDM9|MPjXJQ$byJ4GoFpk6+u2M@vr3?JzFS zMz-Kzen1ODz3o^51y%J1*F)ER_ZxQa#gCRMhL#fM;hR)pq+!=Dx&j)31ce0G01r+5 zu^A){CZ&^aHmNmHCWrBEPtR+@32PgISm`t!O#m;AyII6T>t!I2+tQ#qx%+>{EiHih zBw99$X%N2T>X&ohU_G|=yg=$$7=RT3Q0%~8CcACh*N)s{k``s$fJM+Ii50fqoY(tf zh=EuIwJZTc@Yt(6_w9!+guce3Xq6#es)<1*yt}UAfg{)Xj5~Vch(#z(f2gFLLdRoQ zxHajDjarct%&LQGU6pRH=5S?{LjGdKM&d#@A5ULng~~P@Wm+z1<>Wyl5G99Fu>jVh zMc2?*7@vuiY{^*Ok(jc^qyPyOW0C3etRQnkF`$>H(hUzzD*O9j=Q|wm#vB1Ta)B9% zpa%C~Zg%@)9HYB5DSA4t1TeC>V77x%(_j(Je6cy%R#q=*9kP};u_FH8zpfA&oqFS# zH~4x`k>&@J_2&>X$)Plo4YLH~jrTs>;wWy$^Z979%1H`F(g)Kct5XnI1F*fTi@m$z z$1gf2p&N}?;+7c^6+;;YmNqYguaZD*;BgTwr!m}Mb~Fm#Yi zUlo8n+|jeKuY3n9(-~=FVh}TvzT|8|jYpsKBMYn8Y1V|DK+^h{u{NUUHTDO${Nm@q zbL~kSlLlpR8Wd~aY6IJy!6STA!(*spkjbFxr5C5S9-x~Yw>;yrl?=@i)2pg#y{rLT z{{Znm+kCLeL1bK66r5#dnVVcWMU9q5EvR1F32y%YKrGjMGn9&4sSdI#jatH)2IL*6 z3cZii4_sCFUyUNDNu^)>b&vsB2=8q^)l3;BKf z?~HkxIbBu33d~R;U~7}I-Tk@Seehoh@XW?yc3g~7=4WWU(7+220XOjQEL|ULXP=J* zoSuyfJLui`cZ^;rm_bZiu3Y}w*REZrZXHcfaSiTPU;dvhhN}%nVaXH`2jXqcS zSTckLg_F1+Pg9E(U>PlxT9PP$0KyqrK-w~_+PBca`}4;J@WN(Jr54IGjfuJ>oN)3R z5n&v_6$eNsU@ZG@e!uC4VU)h7h;MX0{C_?%T^zbt2C$W6>b?H>W+^=Fpm4keAOm}P zA6#LSMwWr&CWeqI#efIXf1hE3$Vd(mhj0p;=ab*P_86=a)`Eq|*?gdy@4u;|`{AaN z)RqRu=U4=KcJ1E)?G;vbY{81k>Fjn|^~5^DL*zAMVjFtzh28pqDiYQ=0B|V(0InC9 zOC69wIyft4`_}e*?S_<*aSm@m4W)N}^}f|uD3PQJt*C;A!vkA;eNGXEW-LnpPLgOT zC%2))hc19CQBNqlANzc;iB*J(-1cBAt1FO6v06ZHPh4hY@!b+P>L^uP+>MB>jx>45 zo<*>#br#jJ+?Vd$R%5l_Rn?w*e|zGe#Io?Y(o~YegJy;I z7m6H@;yC5AX2=??d;5`#2k{`qLde%!ANR)*)l;PS>y-bDb;M}A7$Q+B9-fdGbp(pc$zT))pxF1hm`1z;e ze*udyznaNBnOxiraZIL$u6Djg)m!7agUOVlvC2&~)8FNPpqb%WHjzu1{0;b4S&W`1 zlb0Zm8ze?ExU*DyH-5zLgn#~!_%?QUhlF_3<^rQ?Ng`xm3X(uz4*d45@oSaw+}>j= z99hj1$*V%4qU~gGZ=dOk@s-U?;bY881mL!mKt_?ZZ~p+WC%*b|(1u?+3I71)YQLKP z`!P!f3*2@OE8$Uo4eDGX4YoSDTH zGENyBg<-PFPT=o$Ha^#k)L)FVc_|V<6b+n*T|TzzhOtyZC;8UcwS&a}01kM+Jl;lPI(IL^ z)_;YyZUF#)Tq`e+c&~-c6ka(Ho*-PtY zBmn!guk52%7!T;Xro1S zu;7*tL$!Smt~KEDCjMFJ{{U3~06xOy1SygHUE@9(BBYbNmJ}|d$-Ra4Hc1tWOxNQN z4e_~BKN-l!Cbd8LMYOzBvtDR-9}bY4#U5%f7=Ln z>=|JlWkQUyLe!F55Kn(?y|GAL^6Z8{;i$yKBW-PbaqMpoz=mdxMp(KdBNi93(IdZ@ z9rx{xk;L)S=GsD>qbpQqvsna)JW@&nB!QRo&3Tufp&$Oy`qoUbq^HfROVLFn8m-R4 zhZlW`;~I?IYO)3CyqblXG?2GsivIvlY-eUb(Js%Znh!(<0`9Th@$zzAoP7Awb|j8T z)JduYo%sjb=kmiIAD@#d&E{qrcJhK&wkHCZ7$jFfUsdkl^R@HtNcXHNB+$wi$`{Q? zamM?9caD@=lB1m~gR&h2CvOD!d1$ZmB}P(P(r94 z*Ata6Ihm>~URsy+b zzD;ap=5>-*or-FdGbpATaisSm{dez=qJ9p{!{SmjZq8Cns~He0f(jLCRaRYwoj~ls z-y-pWj%V=nk_n?hBf}9{k=SWdPe-w^Dz+P%Cl)>x;?j72doJtq|-AMQyOyQxC}K|UnpxDh1mk}xu~;d^V!WL&!;mZ zW=N?+lA(hF0|IXJjkJ(!j7!G<0201`!ZO|)4~7hr!7v(i%59ytK};q2aK48+oO2S*^=K(WW9t8&d?ZorZ^y95ry3r8+y z87KUpmnc*Lq)31D-APkzL^sHq#ig**sFG}&EE!zVzL%46W|UP^YTr;Ku&Nc$7>o;) zl6WJLv%LV?nUK>5p{NTqKq?OPcyZ${deWlG=Cw9u*`1$31ndFRupsUMQFvAb%dAp| z0L+TI4_Vbz-8GZ3qeD)>iaT?LCumcma6S1NMtwlEiZvCs1I%@ARzq`F$Tfp!V23T5 zaVy6%w1!4u8yf|D>PYSE2Gxn>T_IT^%pFS{t)r*9Ij!j57e|ZQk2i(Nyi-dQ;!iHM zD4>QaPV@=nSs-^Dae-y6vDi{ILlbo!DegCbGZ_kv)N!kOvB>RrM1;u|iJBo2ykMwR z3hG9!U?1Y8t+}uc1k<)&CVaC@AI+h8)NUiF);+x**_zqTL9=8GgMrJi;HDb00Q3Ira+f%Dtn3dv~> z$4!<=Q9|7(b?L@hGj_q076P^h_=R^2K=clHnhu^}pdm-*AyqN|02L&b2mtZh3uUt? zj2UDt69m-gcSBVpd%AOfU<2esF+;Er&QAH`98rL$gdCAG#UGlg1MrDaJ# z%gX&XL$I^SHO8AKZQOym#aNH3k~*^q8&;w~0XN+J0T;)AYj-=D)htd+#>pPmki%rj^D*Dy$xA}K9zB=nvayHvy@m!9o@&p8*F-;?oEc*KV z_QP;Cy=){xij?_2LfYx!5cE;dw}2QHc{gsQTG zLdYBO^Sb)tXu*{Wx1EUPg9EX;u0CC`tiP=KV~T-3QhqOe`Tm%7V~Z&WX-tL6=m-;H z8)8X4t56lai^DI_s`e=HJ*ybY1pwI`sp9=Vzf2Ms3o@W&)u;eLuy?B%aby;bBVi&) zPNk|K3*ZgtcJ;#DH-bcDinPgP&ziuaM0e-paV(T}Or+UU)kV=Jy*ftVY)x3)addqS z_zzf^P%|N53h8Dft&r9`U(bJRI`XL#Fa?pMQar($>&x`t+pw*$1?dP(VH#&g)j_Bb zPWyN5zALgK3z}&aG}xI2+5w^m7w?IeSs7MMO2Cb76tx4$1Gwh{%tzFfOkv0?xs-s{ zy_*>6jI2n(G$>Q7Hw<|MU)u_4A@R0NXA-2PO-NXlRTQCis`{Kg4Ba$Ur5agS5Z9>t z*Bl&H7@kFqBZcLne5j(Xtoz>loJDgCiA6|L_d8ej^~7;fD%$L}X_^Uy)u|ZyTUOV0 z4jP5}uhoi6x>pN8H37!n;BoG8NgiQeLZM)6jXd%{;9!|~-6esEB;KN0_M<>-ZoXB8 zxM)5@Vpfg1kLIDhUw->~;hvtUGYK}ORiY_P6Jz|toj+=>_^gUlh4mqFHr7=e6b893O6c9FtTjkX5azF>CP_7{IP$V@g;J}`Ec*IP34(%f+*|&{+;pU{Ga2JljDLv z&Hn(g)W_jg=Klbphwz{JRpHqgGV^$+h)tPT+%qm&3`j^E7ONhkjBGqF!ZKbHqbuR? z{z+76q>)!q?r)Mwy~(@4qn1e}O=?y?`UVLMbCC7gb1T79;wPY*Z(T6FC8hQhpLZH}da?tO{|mxl^O)NjpvysVKl% zv#|Et9+$&YHh^gX*Z@Lo< zHjF9QpXK()%Rdx;0#<2rULFj2j%iTQY9r`(9k<7z@V|-c;pKm^k0uS=Odo`PGR@>z zoX{i<%$h)<>QC4A$Iv`C@l;uvP>HuvX_OC7UVoI+=W2K0>(ZgO4=h`nHeIE8{67UlP8mvIig8#UF|0hGknq z^)!vi;8*hJ_r@Q=KNiQC{{Y9Cq*-{~sO`OR`{O|#emu$u(5bZwLng-W9CzW%1h$Tr zImTN+X3Gi8a|o@`lH&k4VcPzq2*m^`{yg#&jD?WuJxC2$VmKi2_r;WQM>s7Kqd}lO z)z}-J1^WL0#uv^+vASt0l13RxF}p5|dJ0ftnExU0*#3mI**) zln@B?KfiobX8a^X?)gbmrLk61@DeyX;`<|=NFiLSDmF;AkXk=9Z^at^6WbMXLUKbf zDM_GKb}s{f#c7Ad&tcmGj#Gr%fA(aQQ~k{{IRj)hFJ23 zmrn~pp-uR)+k1zeku7EMJ=Gf=adB0DUiie@qNp#O-zdpYUB;re^YfHR3)GaZdWw zM?+aIpB3#}fxb19PmuAvp?O=OJZ>1stW6TuYWL%}^u}Un;*A?CoP({xji@nU1DZmPh!s>{RJi z+;-=-DdWva6K%BfrbGFZ2E)lN3_YYE~Xq+zVRt_Qe>>$pn(ONOaMeikQI# zg&W*a9;3cGzC51_$=IBUCum`%i#YMTiPRvLAhA$37r6S~2PR@Hf3!+He5vc0& z(&^*3*8Q<-pBn9gfJK5x02&|6;+;yXbmUN?uIu?@wT^bIwpH{sHSXmdd`9aAY#(UDrKP||9_ z8*Ti!!qbn-%_>N;BDewJkZ2@|=Do%Or4vJ)CPxSK*2hfn0Y3hgdtL@z7Msy;nw-fp zj-<}az!rAZeT|-O`=49F&`S)85v4J-gh#Kdp;9rqy}ZW2=Ih^T?3usJ%ak!d25^N| zK?p>V!yh*1;v-Gi+}-xUHH^e@$VQpf%V;Y>gA%~d8diwh05)OiSOrl-8|LV9#S=A^ z&CLYa;h2*yNY@uSej8F$M_ z(dFflX%{Z)u^?B>cLFHPvd0{OA~Le-aTy^P4ZB@}ckFOUXd5Lozn9Ev zZise0MJc|6r+`Nk2sO_D*7%Q68>ssBDo`;X+VNFaXGpFncQ}q&sqCc9odAAPQP}0+yv>PLsg&Do6hS zYpdW7nLTH2r&1;k-~tJ-DQwqbJ$oOiCXptROv}iwORLG%e>=AO)eXgNII7s;7MnsU zJYmBdr(kdDHs|?|Ob21DA?(u`h;Y3uxQxb*NsK8hNnKu|_}a$!XG2RCRwFaaO9%`= zH=G|b>23D}9W7V58`cy_37ixUA5hWiw#3(<=76dq_6Hq_(IAKZ-!sHes+gP0AUy0i zZY+Gx!xeTFR)Py@qm^~rh1Fncb-MF_cjJ1yJ*=EH6fnplkpSyJ6u%>kk}pwRgDI;Y z^X-5JSB;_y(LSQVh}^I?DQc|m+W8z_Ej&IQ5WrSNGARV3~4u5^pS~;?XO4-0P>%omu;+ZzbBbvNFpgk44PRrjYvu>>8ln2W~RCz z{g+i$>nwrW$*Ic0s`RpjVO$Y*K4fr48Syw}85p86EV^@&lJL6~K2@el1nD8h_5R6> ztZd;-r_ZsG!pXS`Jj>=cteYeM0Ji7;Fr(>MlrK%BX3@Q=`qu=IdvQm;9`SfoW*MV+ zh)BWsjaRjAd;JeQV!gqdK_FPcy9y2Medz6Fx%pPrjj^O+cE$53vpDkB$Tt8Dj`w}; zx7%*lfn?q{%c@e;WD1&V!mL214~4K^6$X!h$E{YanxBQXHqCCRRAoS zCvE*WzH1RjR9Dv8gDRsCY=i_F--1`B*+RxvjHjpmt35CrnYzJz-M%?3)Myn)&8(Rm{=B-SBip~3T z@9&9Boh2n+&tAXYr4V8*4i^)zV*PTY6oli#){ z8$3z@sMaC4dtiv=LMm1O`5TNXZ^z3*&CC{@}GEo=`O11ju56i(!m>9;tQWpj71h*8xZNQ{Je zO$uFx!|lJ*5|N;39ipKdAaJ^9SR(7UuHWs1(k)4I00Qb`s)4#qjo4` z>)F|o7fAD2=`$KoD00e*Hrxv&?d&@YWwP0Wmdh?)(V>MzK_6VZDAG6}e`kzW=6p_d zB~g)7DLzFCZpS>Irao)r&nKIi&Tsbn9^LU=98DK9AnkhmD*>@Xh3J>21 zEb=3g>!|8h^C_?@wkM9;<3Aj(frFA*)6|U3?Pq#D&Mv$=FPZU4;>zSBX)|Q%CqW?q zHeBzw_umS5VFmUXY;Po!XEU%tmr)>j6ly!Ro3h*iRo<%~PJ^cc#)Cj6GT{7=9Djdo zRd_do<@^Q~VGScwo#%0n%JHmTeSB_vaX#$5+CnG0Q=2lxZY^>3d&o$j8(( zaH2B}1Oi7Z$m0F6knz6^i#Abh6};tuPSGM3J(V48n+Ge)|qF(4HjK zqLjC@Sr%tl0oi^obZ!T(_%h12Bm%>6qU>UV8G3Q4I&`X$Mz%M%*L*n^cmixWANuYW zZ!X{8IKuJ$2AVv!aA@<;BVudYfA8BCY9q`sAREv*uET$PV-q$=jH`vE0ItN}Y+K~~ zNc9??h!hX4U-$LKOqiP4YFDE}CRRBu95aSen>N-v{{S(Moc=2OIWLChjQKLcFyy9@ z+rHn{vGlyOf<+9D4YzSs{qKgPjdSqUnobQ1qCn%f{9!n=&&Hkhh}33}jCfb#hCt@=lUoTqtbE2K&3IgfZwy<@$02!RE=4F6w)RP}u-|N7ct7Ga&uS88G)iL%p|-XM*XTL- z!ZUb|xx;AJhsaIweVQERN!8usU>q>uEjaW)-^&TbnG!)hQe9Fy2?PW3;E<=WBa%OR z7-uh;45ZBKA~P!KSGxzf9r2R!eCW#f=3TP#GlnG}Re}X`z1g9R`0>2A2in>XH6_g! zS%EG{mNAD$3Z<7WSXtbWU)VUL&O_6g!xDD2sF)HzuK2Xgm}M}ik1Wzlr>GpA{GjYT zcq0gAp%WPN#g&1yTl_(ptkAM33~Iruzx^0;JLq%6vxqZ@CX`%}`Post+IZY@HalUw z!$&j?4q=(pvYk?8&%OTN(;3eUmR!8qW-hcJMbeGK5(fRZ{_h8!q=jXUCYb77Mwti% z-vBoK@pHhwCYehk;#nAE$w?gf1E~lBm!?2)F2#2G;^GM-%(%_uh?l-gRi_C=Y24*bAL6<<(lO~RN*z>mgV?oBTzV89c<}P-G-Jkr1scJ&yN$;Jj(4FNVs>opMDsZ^8rXmLp-CQ3XLf@81Z= zGeFEVx`le8c0Rj(f1VzTF{XElMpr`SgMxm?>xt$vD8s6YyAVRJENj=Wq5bcU%A1eN zGZNn7Gnerw-zyTdZ(7hA6~^AD*Bz2t6O*c(zLnKvZO5tO6fyO3u0ejLhWJ>}vFbjU zb1+#@xR9Fy#-exMk3sE)%M?_#>@|_KOeDG3oRb|I>9HY2U2lTsW`)&&K+3ELB|$Vr z4*vk&_@K-eh1lxU09_#ZeLi@fP)icq=^`^{Pb|Cr=#QOaSHN0niH>d4EW?GGO$GLq zs3(|>)c5=SG1qU+W!tHx*s)^AYqt4+JXXTzShkUXl!vvT0UKY`VYqXFVTed{&`>}Z zy%G??&xAt*OJYk6%SxHjJSoQ?*>+j!ebv$9LJXY69rdt2_g-`U8kPQCBL>dV>ljiTCu| z9R$u?g3PLIqCl~+wNpga)x*wP91uw3r5OVD;0{#t00CWYu}2&pwf!cYSEw|kR8D7SAO5VFSK3L z*wme9%!vIqnC#>-)9Gs!y=`8Hz6Yqn6thnQ>hiXYBS_wbf1t;+L0S}f3IhnVMQ%rqTc`h1<+|Bi5*IK*iBL*kmEWWTrBCNL1F|Yb1|* z1CCDHVYqVYtP*5EuG&<`FT_Yz1lak1`c;a^<`J^KsHK%8MzW-90Pt&i{{U`ragu2x zg&h?iqD*Z>Cd!J{&fAK=Z(I--3Vkucp)C%eS=GU|Qfyc7k!!tgN4^)Bv~aT+l@&aN zgkE6Rz3;Z)E;iuXNh%K!0YYXX(#NLgK~TWj_TSfo+ikF;kYpZUvm{4Ep-9v}_Fh33 zb}!K00>eti;)+bPl3S)~Mj>HzVCc>W(!sl$BcENxRvFG@{{SzVnk>msMBA)VM>JuL zF;$UEY5|AEXJR+p;Z^}sXK3|@MK%}8!${uvBpxb_o_k^Zs9v68G%M8-yP}g$OU45L zJJ?co2XzC}3zHoMU67M5GGZ-qItF2_93zcH?nfZ(Yu9{Rcv)wH2$Iwwt6wb}EvuUb ziQD(ZZfOW5A!8#BqXmpp3sb#v+wb4MHF&h}+|x-6MFG8T^umd5bl=q0+#jwn`49g9 z9*zDwyp+oZh)bGdMtOo+^ciA&1rK?>ms*a|i2?kmvZ zF{q82DEe#w*_+{@;Mif@ZT2^b%%O7m0A*$@@)Fi|Bw3sNTKBY7|9%eSP^j?W2fS9Wxk&i%P)JQ~XC|BzK{=eaZP?EzyjqjJvTQ z$eL?SZKm#Mfj>p&J1`TMZ zy&%0pkvI5dbrMM+>autBKkf3v6OYUQs0@z+9ZU%z0NfTo2n77ifrXs(<#!PZk4GZ* z096lhzrQ#%Gcqh}^0KzFEl#2dqo@te)m`_ZPS~gQDIAeBDn?;=vKv&u)A7#N<^1MFKY8Q8a4F?ri#Y#DV~< zWFf#8Y&9*P_PzZv;*862CbB`@`wrgK$;UJR+EqDHEw^Xne24sCY`!2$v~DC4dB9*o z9lm>f{{U?^(-WdUPHC6JnuhT3&@ z?R=Ww0(Di(>!Yohwxa%G2;dLDry~W(9smZ2<3%b+)1u360lObtzuVUr9ubuM*NJ4K zf}rZ=kx>KK0J@&{>EDt)aa)#MO}F@fjV6c-{?(0F;Eb}r;%uWr)dIMX5Tu=rsng$% z`*VbO89p8-+0n~T;4Yu_D zcz-oEgwS>9YaxN4K|Ihu-Qt|AvP}$ma_=)bss$ez?Z0 zcuYo2)T2{vi!ep`9H&XLzT)Pl(UYmZ0Xf8;>fUQ0%&%uQ+^-tIR5|+ zWZ20g9PH>nFyQH;D1`+26ZJTpz9_56Z&d*B2c;UvKQ>>C_`{|B!D*J+j}le2V!BvR zrCqn*f3Lm~%jZn9#v`}0rK2PPSGR4xxXw+Slc>Eh=3h5qwQPI*v32-fC7O1f_eg== zwN>rwVzk5)<&qaY@W7fC#_3egp^D7c^>Z|7S-2%(o=Ze(Mu?1CF z4aV1e^V|924n(9mC6w4YXp(+kZ}i7Ht=kQ3Qdv%7Ovjj}lcWu*{jZEwXJJy{1ORl% z)wnC^^ToWOH4z~q$sTTaHNXewicb-mGF3nln*-tg8a#LGF{>IQ?92GhSv4CZs5+F9 zM}FMnCm{Tc>U7*kciQ;=_{QHm5qEunSLado+K2jMFP(zgx7gitf9a0jkCRlz%?-fG z41skcF#(sR;fCVs>!0O^)I}pWAlmtX3^w{>9=VA!Mx_j6H%oR=ua4J?uMP1S^9W22 zs0QJzk^b0bTupq)s7mz94CF^b+Y}*G3mcE$=Ze_;dEuEO%}Qh5k+j&@_dnkly@i%p zAP(x57TB(k0X&oG?}`r@$eFbT^C)daN#mdI>xE-+m&-CV*3q-@?;Q@kGV0_IK?I)s z3~Ky4#1!g`(QwOjMbSJD_Q%0;{vnv~c5b^fWty7GpDy>XujP&Z0D}1BM`J3eHFQZ+ zd}Ak<;W(~Q+8d5)N7UFP>MGc!4OK?{>-uBm-;I9;W%ChrpampNN8ucf(l9pD2uQQdG~*8Eo!n!1Em} z-m6+ga19g9Z^8D)+rvK)2^CC)F%hsrO>x{;BM$y9{5zDMCur4*miSGdDDE#GG|L?K zhGqm%!zA#HRH2A!1YK{x`rbWMJ{O;lmrTUvEE3;G)mOxv8A6cC=VIHED0y%ExPL5z ztN#F!Y$>o9RUX6Nll|T>eih?TW|zoFAo}bj5_j*8UVIJ~ zsN0HaTPr^k{vV4pa}5HAkw67Vzj{90eX;Wl?0=V)kz;irkjhW>^vBtbaK57CST?RJiZHFIm zzB}05qYhDLF78yt;gV?cA_+Ch0)X0ltLuO7^uZY<%}9~BRYZFR!5~$0N03L@Va(J~ zoXC+8bmO6Kqijl(B{9M z31UkULVhE2W5kZQ(@0FT29QD66Wx|?70F1eGqrH;gk^t;*=2}SEv{~1C?)~>4ENjgUai(U)J~Fdm z25KgAPvlzaQnC(67r*=ShPI)`tCt+qhE80Rr1I?y`g;i zD#;leu{OS72^dP0ti+hg>JLJI0NA?i$RAuEG7fCR)iofFh#=QKz5ZBD-b7?WZWjZ#QNp6oPs^J)zn3k>ScX=}I@RmZQ(5SmGJ3Rl?>IqiFq^4kh5=VetS zD7*Cl9sYHO6~{`-*sU6sD~&C=`tjW1yjpuFC0Y^9GihSf0JCL<^S{sjaFQJ^HUM)( zSYMt004zJ1L?F4rLPtDKe4Bt^&iT1&&DCNYDQOWfs3ML9>7)W#VRx zX?+UCk}CiMo$6 zlA(;mumQd&R$4w?k81LL*uQXvi#$ z0`~{F-)~z3d#q4(s~m`B;z_^9jD;Y)dVGeg0tw=cgGbOEu-8fQ)47R4J+7wP$?2(f z;aFI${qfwqU0$Rw5R9p1(yG*KwH0ntwbtG14a|{dU6{H&k2G*j>V+Ptd6{VM%`iLf zOV|a3x1ns#WaV>klA|p&6ZI2IQ$}c;K-GR_1${vyxHxwv6>?3QjgUl9l~Ed~-O}Ii z+Ymi8*CQ1f$(mflhN)0Zx~zrDlAr;(3}^=KD{e7);lWN`IE*OJ$2^xHW)xbel6S8t zvFzQ>GWi;Xn|yEo0B7Jg+;##5ySi2`Uvapw}FAWR_|>k=PHu zFhOlIeD;jSY<{aLje%0Z)SV~X6YE6n#gTrPN@iH47j^*(&&>-LU*;yj9jhJA%6~Ax z3kxo-PPSN`$s@lX-Qfu)WOpDGWLr?^TTv$HpIT$jZ+v^bzH zE8pdSfWjp#I3Ex@*x%RI{{Xtjg1mlqMf0&$sypq#`iu$}Thl8s6|qGJf2iO6@JJRJ zK-9%rwAPYqn%~>|7;RY*BN~mWjTUGl6V8#jAP(mE`C(O+o+SM+6c7T2_(3;7cjSK9 zisu;5t3WhJ0FO=TzL*DN5=D}jze!h7EX}EuYGGHv?RbMUYQaz{ZDec-`S0Jb!iP3w zI-gK-4YwryYXpu$gi(zaz&itXy=}SRVRb?BY@C|pDH@?6KvSfXXSw#e`C?H zM15rPv7j|?UB=#o;B;Bxj%HAftiS`v>^^*b#ygd7IxkID*aN=c-q&OM;|7vQrEQ*y zZGo`lN(F*I1YaLYtP)gvM-B_fR4aK=Kp*>ZD2z-+(2Z775y>jZMUvDn_WpQ`vrCwh zs0T?@1kuGR?SE29*b2bi^dDWEfGKGiXVU7RGU+~GekyIiG&}Qn-Twg7s7mSmg>qbADp{0!H`T);1r8JV4~LDI-Vuaz9M4UvgJSvhFr6m-ykDsZn{Vy5}=59sS5u1ZrfV=mvn8pt#T<5`bhDB=2 zh~_iOQY6&B6^O-t{O&$@vQ=1>oU<^ZfNB7c2VqBpimwC>9#U+^V3uj#{y=Z2ZOJ=( zeQ_NB0EvW&qbwB`j^=`n`*M3@n&Zk;f?7dy`Pz=rm=W1PAbWxJKkbZ;XFnQDuTD#i zJ0S&tJ9pduaUT)!RaoS=0P3(8vu$Gk0Mi+)--w-6DQ8wZlVJMqRx#AWgrvTuoZcZ} zqXaYSW!k^jf0ik8G6!EE8rzV#L9^egLoqIlC=|Q(^xpXN!gA-FjYgTx*$gh_+$XcQ>fp2HvZU{ddKFZ#3-WF;pd~ zPUf$@5AU3G;Rf_(Z5v+>@zkCxjevU7=54|I;>hOGP)G+^1-ydA>Ay~YmE+{F_=ayQ zIdqH$1d=+GwOnt$G~ONfh|1Ba)Q+ausGf#t@+eg zvAurSqk}V;@SMiHxMLe^P`1P07P5GfNZO9bZ%(Ri0K!>^>Y_6507UV=Zu-W2+i_<_~xxh-{E~*B=-oDt#c(gI* zA^}x^MLGuRNIciJBmV$)6A@ulG4k1~0}|F*5=mHSU{*7$;Ujaf;_nj{H-x!0BPBdJ zuMVB%g^M$`$S7%{>EDg()MG*5--!lf2<2lO!*Z;f6J5P=+{y%3Dgn0he-(f))9H#I z5`hA_iORHNQjo@tT?+5M`0Cs|h}8S~nCe(%ap>RWrk6An<*bxa-r~8%_EKQkQqpGz#7A?efPn%kbq5 z8}Me1Mc7ZpKZeJO!tIu}l079sYTu7vmiYNRxoP}0o+(%`)#YtNgU`!*$JfSn^CW=b z0}-~6Zob=dj3?sUi%k^yh?YL2V9*x45>%OsJojXa)E!oeP6&)?Gvgl=OeCZfq@m!=iN>S-5pT<^cW z50{o$rl88GSb#-d-2C~%MdV1qp<1k(I||#idg5@&rNFhIO&Yr+elel0R?NxMEuCdF zk(L9i>_)-0Ay1SI{{ZI;He*}SQlg32arXDZ);kJQ zqQUdl#gO#D+$k4Tu%mz8*nyE;F&b@m_-KybY$TErh+B0VTmJwFyEp^LNn;zEEB;<= zZ}P(yHDD?0WwcXA6!E;u9aU|VNLI;TQNF~3^do#vFPrIqjbiozYR2CD;TDvEA&iCq zg(kQZ4I1~c^f-Z>>M{eZ^1Xm>E>-9sB|>4RgjyeT;@f!l$1Tn+&K zc`&ZL!5=JOtUI|OH#Wme+!N1!YXw0XulFB^7 zwJ=%$wQt*xO2Sf>G#19&1@G_gj>#j0QssQ&xVocWp4b+rX0id__Vxn~hLD_+wtHg2 z=@zIS!~L7F(OH zcK-mI#H5yDP`sdpx(;kEz!693_8yqzYWf>XXf;R5X@zbH`)%IwAfOd9UpuL7C!NQb z4^LcOq7uuU2qZyosEWEBw%h0Fh7d>~T7H~lYW83*!R|ZrvHM^NsbOVhA~3~`uE3F2 z2)|$M@NA>WeIY8XNLUiKZ%JNg?N{6RVsg=ZPDBe4tz{_QQKfo<>cAtv-?kglI;3lX z;vkT(Cm5=FzSEz3wc zkhDO}6l$&*2lA8g3Zq1Ad+l5CgS4>Cnw6kPl}v^v0!mr71#iy#-Vn)4JTQn+ z3-tiYfboT3IT}v-O{g{xd_J;En99upo~B#RF=;>rO0Jz-3!*%)Lp&Kk*Hh`)~vqWxp2h?tM#!BMRsk^%} zdG$#1(ms?bju^(IokGR#ZtsoGDKj!^CsPxsST;cf@=veve%QM4Spc2fs3;nVnGGoI zKpOO~pUV_e6^!hsP>n$s-%sv1IPJxpMvE^69u$IUJZJ}Dq(%uM*L_EB`-=H|@lt~N ziXTl7Q_EI8@4h6DsvRfd2Ee1ZqIvc7$5I>WAp{O>wzc2aj4=U60x(rX6(BsHUflMt zwmOi2NUM^H^3xGU1=jM`&}LV zzn=IM!f6owL2nte8m9wEG)DV(_uGtgW?V9vBec{uQb0dFzkEfRgykf%-wm7C@PB-8 zNNg%yd&jAv? zzkx%wD-8zf@0-UQd5Bih4>T&zeeUoym!Pe*O{Fa+lA%uKw3<`7{#*NELUAJ#=IMb= zQ4?oPZ|UoW*QSyNu68c{{Gk&hQ`YXb~2b z`N2f*C?+;@zU7TECy$uhbMJ-IK|R%$Ev6Nn{t z2F&#$kZ4BuMRU15!Twl(AuC2DQ6m?v^w_@^N&fEKsPfhV{ zke*00O784*xT3>V_VwH6jvj0j+1$gGWM6aWnScYUDzrjFt5YY!J$T=P-wNa*Io#xN zzWSElhnq;`3-8JCgr^H{x)J7Q45m=6=7^MU}SH-4W? zTgR4PD2QoY1$^Hv&N^aIv2vlkBWq#JK=cHlXW4SdtB$$E{)9pW+DJbV^xYeMEywcpLU4)&(bqMhtq8f(RYS z*pst=wh}%XgaWGT6OB(yci&F^y$&D84n^|{_?aU0(WiXsO;2ZYs93G20f1&F)y=n5!>?Fv7X>{oI(zjMdF1Ne*a z0#B1DvRPBbgUJ24#u&vcT{Q^NpbtjLqpXqMu)MPMJgGQADtslGXn+9y*Z0DggOB2K zL!Y0KLYmn$pUQfSNG;{AJ@8kVrjg00hLP}+F1cfVr|sBcuP}}tWpYDE3vAc#{z#n-<%5~X1zAR;UQkKx!Tqqg!Laeq*+2(f)brnV=A@c; zWEvf~fBhbLBYY11mZ=?8 zB2wB$lXhEgxEHtgSS%(?tZ_Q3#5$3kL~v{owQ=V4`u4!weuy4jE}GPaX0Ac!(|k>u zPKlZ~Vb?}mRWz;09QUEMZSokqy%g1KJP@kpWY$YMxXXrAV46YGxUZ4e8sF(MsUBW9 z6sWWf()1^Izn+)<)9ph)11SwV9N zB-x@AYFV9xX&TsjC%Gm1a{cp99b3L1o0+xmom^uHHWQHn$$1j8RmMzi*}qnpgbfeKKJo-k1;wHbLjstoF0_R8q7$Qr(rFBhZXChLTAE*5HkS zy_b$Ix4J%U)1Y2t^-FmPQC-Vkj{e zu_pbt`SFd((mCxFQAA@#^A>GFtnxm1Y2t}sr$Ulw)Vtb~{V^h)Bx`C~6b{~fyLX{@ z=>|nr)PGGjAnZ09`VQyoi*4B=fkvP}=+uRXqsHd8_Z)YiD~rP+gDbgaZ6Ks+WRYFA z^xuzN@T9P^M4<J;Mro&n8Xzko{?}j=JBr!X+ zLKp@CiL=3OC~Up3EhJU3cSV!~A`z~rmfM;?-y30hSwS5%K){bO*aPp*EM63bH;rc< zL~zGa*(Y;E_x}J~3S>l$k5{cu;0oG?)=z%ie%OhX#7xH_QssqhtCgcyh1fV-D7CEr z01X|$*!_ik&#o+%8inL`^93KIJCbaWc-nyXyg{Q#mg6Z!m=GA6KKJ#(DHXx60y(Lk zNYuIHmLtqZ7CkZ5h}jPrKqzY0xUNqef37UVwUu(2(Q*`pOdCL=U%fv7&z8wgOMsagK)7YLi%Z01ZH$ z_q)5_(+7w|#30IDGVf-$EE@kW9#~8%=g6N&f&} zeTEdRhoZ?d0w*ofSm;m#*GL!j=l(G`d`?K?D6E=xU5(v+aZDT&rJS&^LIP~x(vLO2 zraXn+SoKu7Y6r2t{9q-CKE}f%i_yIjwr3%g%_@M>l7)8+3XT5vi@yiXNgPQWMcimB zrt~Q{$Eo(m&T<*~GR9SqDOMUmQ*^*`PoMzZdSdUwGsS#BGvDQ;Wgsymjm4ka`D2CT zc&?iCcClwtOX_{wD$C&65fnn_Xv@t)i9Ln#cf|*cc-u=M3fFH-XKJqg{{SoxIpQ+J zvZz8dZs-Xl*yCZyHH?00Sx9+jjz$2`@`4E`U_QqlbH?#`OO(BvGGwGZ;(jK^IVBOa zke!f$<@c|CH~#=!WBg0RnsTZHZKP22KfiOn8}T0!S@MmoT^Q_v8-wrMlj)3`(~hBV z&)y=mTicJGJvWQvB(0pB*;{m5cM53pQ&`@sv~pfhPj?6?cj( zrfx``io2tgE1(+$a%g(?tXjx&^ObzC07?EIm6)>w$pn1;w&NP-i;EVrX%4%E2dSl+ z>cD2ccfN5y%#f?oPW7se_B#r#xWztc-Z+GDyE-uj^mM@>{`}vz0?#Wd>a-%ogrTSJ zwg=#*3(Ctj&uL?=QB;D!ZhzR(yl`jDE~iqvDDsiE^mhK3t8BEgz#cTMM^f*(HS6Ao z_r??u%7Mzr88m_qJMUoo^Y-?`76`Np%_Pl4J|LYPS53)0{YE1vh*?kwB$HZ6HAL)d z`>a#JmCQ*PD0zK;ey)jJLWE#Y#Msh(# zQC|okUdMCozdT?MlZ918WR|*LFtC4!f1B8T{+L?B1`oxM^qwSy!l7et)U_hcn;iDO zFoXDo$s&bE)HNL=au4$2v0c&2d1;-M)YVPqQajKbQ2o1LFA}6^&hY^u1x2>}gSp?| z4_IJ z7DCb?EV`*zVYwJ@!e2l>Bl?US=mIzCjR(y`xUpkn?fc`0I3{SCU3y(eB!joO^{W_A zOr0iYkm{<^IHCss0DrFdld>5Z1BF>tij#i6Qhl}Ypj*!@VrgCPpQnlV_tyAxG+TXTr(Q4rLs$rFdAAm2af`{9Z-QK~etzRa>P)WG2}`_rvchsgVi_)YVEh+*rH zI2K9V>X*wzlw22x9<*)+d1fviof2xZl( z&F$}kOk0h5Al4ODtlu4v`totbq85R>Gzc}?tKXb=q}o!OVsGnM@~(idwS!>l-uLZ- zupC{GvY!Z}r%yPLQovZ$yD=V|{kg=LDNVEuHaCBz59f%AB}IzUWF748aeb9k!dt4_s3K)WTRTUbL~SF zS}OK`NCrP_tA!e^0(S)kRiF z*HCIE$pfA%+W!E2R{a+EzSt%}01UO|A8u@8w7{P#>e4LM;P%Hg76)!X8*_+TI4Zl7 zxb?5>V4Z=a7f4vkY6?_<7k&Lr@$G?F)yAX6-a2BYv23f{@$Zf=#J*7P&f{;DVkAcW z6D$-GH%*F?aAj6GnDnSuYVxTh*#iI?LA0d|Xshp3K7*VQTnc2;x3Ine+V}prltlic zkut7W>Sa*Y9TXop6m@Ft@pi{@^0G$*#*vZKsR7=+yI2Rd&5C3$8D}KA2OLONiwQk0C;m zqzSD`^$Qs7Xf$nV-fQB<{`=tlE|yveiqjb>z~0w-JNjv}w|l@Qh{9YWDFp|f=$}!2 z#f}fQ9RSN1drexMm&>J`SD^2G{$98#YoKwfdpnh4?2hYgdR(DfBsRLPJvKFDSA2RX z&q+jMs;>z^`bCSc;XdGScd~ZC(h_2lMrDlkdT{I(%^kfc5H{f6B{^McvJRZ6H3C?Q z6}SWo6}cV1Y%ML0a?ojXMqOg^vJC(!otZXX%eK1TIp3He< zC0D4cMF9r(*s1pfdYkRe8HtXWGY&x3nxt1GSs+;UCt^7VW7}nOj1I23W^F}eQq63G z${T<3fWDqkFlZUHyv}u)*lU#rg)9!%2KGl{FLBQJy7@?@^U-Sp2F+!S83IU=#U{X> zHa*Dz{Kvi*m5^xYk~*p!4JI-!_cuc54k#P@6Cob9j1~l<+ap%=q>rt47k$S#jK!uo zQyT?A3MoLqup?IVcCGlrV|HktLix#cwv>z{wWWzwQDp8Ef0nDCQgEb>vNn|>i)ym8 z2Vg$72iFmKb%_=*p>*4BTY9iK*je8Ixf(Fqm0H(8EoV^!&GP>MoNS{Fv$@6xsi|5? zHd-q{d+dHg`R{@$SYrW@G^_yJ0-*^uNGFY`dhf;(G7&0(sU(HTv36^o`~LtwFhq?U zI+i7g2uHzNH<+>D2GS4A;@?D0n9NfvhXyk&(!H|rQkXHZ$Z#P8)B zlGG?S0=Mi20W1#`dXG$!LpTA28{Ai6W9Ra~Osglsr3SjP53e=HCyK!7^b>7qj^*cu zIgExSj0(FXvi8{b+VD{+O=42P{ucoNqrn<`diKJs;pLeTnUJ-Bb|cJ9`d!%G`Nw6> zYhtnJj9vLj=abLUxWF~Cg*j|?EXYRHp%X~*w!Xyi z_SBvtr=ixsMuj>Mm+=bR9-BugjX z{LBFj+>6_vUHf21SttM`vGQ_xCx52+VO8M@)7l+dlY4r1`Qn?TT2{`yl=GdJeIv`? z@AdY;&Sa{c8g`%tuHN4KVhtlP(WPx{FgAJMZ}t0PCQ$xj753!c_J7+JmWbZz1ri$p z{{YH%C*q+(u+%TIleHCZmG6W`rZfQqMNzvRpD(Tm2~&D)Nn0APjsF0q22R6BeH(IN zlDy<;D|PHod|Y@v&Eha>Vj4+RiDfU|_5}SgMj6@N1rVg0YXYyI-xj_Zn=xh;*?jPV z!uJ=)uLS=9EMd)VE{&L#G<`EQnCnXArRpKDha zwca_F@nx7PV!$wOgz42opYPKYzAu^S!Xk@Fjx|=;Pz-MOKTp0mIB_*DEb8QvO2xCX zx8-y>>Hv3eyYICheoiVi2-a@@wT^Tz2uN9Ww;fW%D9RmzldoQ$pwsztib%q%2BTZiJ^r|CqS`rLmLi5os^3sZrdCRY`khh$irKkdtqSz0AHUtV=b6>6p%)M2G(SL~d;IYgeyqE>eA>)KEx_ zYa=vqMB%7NO_d;x$f7R!Bk72`nC3CD8kCX+k07n<(C!D&j7Ui4CXz}gKm1K#(#XJo;z57HJ&(6+F-E~@AoE*v+15sM zLsDwHCw;wv!sbAAK94n))<|G|FV}zNfjcwCjiy-&sRqHjv;P1Xf=X0ZPC|6)Gzi?E z_QO{|^vS$|dck8=0hDS}y#U+&_pf{oGON-@*=?1!2V=JWy#B)yl3Ceg9XA1h2HWSy zZuo#)%7iNpq%A^-V0RzvHo>MtmeCR`R`hm2Y^Sm$FB0uic+H+A+nG|;&= zj!7VE5is+IUd@=?KUy=tu&k>vQ@q-ai8FYLE znj6?Yr*VT!RllkRlLpnTg#`4!<^6qe*O^R_7FAVs4BlD*`VRZ?f%Q=u)d(d3-)cMV zJ$9~f=a5A!h(b!eB8vO)K=TX!U9eOKHg~94`Ba^WG%ruC-M9XD335|oK>u9=CR$wsbpP04Hd6*z6ET$ zi96}0w#O_1qyb|09r(f6rcGY88dss*V}yWc3nR7q`(g!8m@xvV9m(GvAOhM(_r2{{ zmQgjZs~MoAiy-auIQ7Q5wJQ^RU+u;`37`oGE3v;XIEs_7Ks_;bSWA!0BeF&Li34jq zd)Ln#G6QVDvvYU2Cy%Z?stSMsEqhnL&mOY%7*VLPrXLlN-+ngq?r=6)>ZS??p+mRC zNdEv+j~ihMpL*B;H}>t1Y#j?LM;R(leSqX~ff5WON`*SKvD9t3JC6KfSwx%#myIjH zy5AmcXckI3NYuKN0K<><#0dxC(|WnRtLu*v%@MM63#%uK{{UP|AX!bjDlurolD0IF zf8AmV2`b=#0oZEX{W0kYcV91=lnu`$_xWSX=3>Aqf-A}_4ZSxwiIz?`F^x=Hblcz4 z9;Pm(O6#qPqOA~pzE~QwD-feW1YP>^{c$$cvjPC~4fgN9w*+z97iA5{8$6N5k|;u1 zm#8$4m2Nq|Z(eXv>>g=jQc4Rh4Gt&|O5(n_U0Gu>`5AOZY%r}vwO^grTLG&tXu^8@M)k?ei2&xtEaxp3Y|71#pAs<9;9 z-xcfagkTC;RBBk*DF_C{pME$4_rqOR=?I@z!>Wgypbzi9GSBtd(8s)O0Eyo*{vx;_ zon=T+0kLMbHK(u`Wkg*l)HH5L3q^p^#E=bp5DkM{SFR9hM14+xNWdwp+z?8U$gM+f zZq&%wt4Vs9XoF{OZbz>;n~}0azo<2aLYoTq^{#5M6J@&C z&Y*3yvqV{3p)na%OJG&s8i`|b|drW9^=|$>?jUN74^kAE{RRBCG<%40YtR}aC`Zy+uslhi6822*Gy|*D`H6-8y{?O zW9bq#6`@-jBcHdW;3ca-))ZMI%ncu5ix%(LV(j4gMUuKgjlXe!Tpa*{Vbyrp6Kwi* zY9RXm0H-)rS(;JjBU6oQdKUixv(LUFB#ysRQ?MH-ZOQNba5P$Ia2}*(t(gcd&F*Md z%jbzk>Z=}-QrN98BkAe5=L!?kYmmD}D`joRru=%0c12Z!HH$IU44}AB$^#v=*K=L{ z4?VG5QGA%xK(Oj+AyF-uiwbDc)psBdueKzGBw`h)QjkrZUc3#?{r>=bD5Y96bJkZhgJ5 zO&Vi5NN~K8*QnX&zib63>5i&M$ZP;q-5=-u_Q0ICnGT@gl$v`4Q8ipu-v0o$7gjTs z2NV9WkjSZf~%$u=T@(D$#0^WwoM9 z@HilU#uVzX>Ae;so4WXU+uROuH)N5d2!OHM%-gA6Pu7kU?XyLp`12w$=e9mkU1ShQtyO#P?l9<+NRpkEm}+CmAdWBQ)^M4{COQqY zi%gz%l@>N_4itf?*8EZQ8)Lbw;gS|Qyu=G%R=xK0KhR>DNu-FEMj?Y;*OZ$6TO0~W z)sjx1n1ZdOozJ(*@SlL2vClh7g1n8FX$)-0mqNdXpSRNkQW8O-k5PQXNv%K2w)h#5 znc0vKbyy(nRk!uWbf-|4l*WXSdT(!G_QUjy*1|Kn6)O;AK(DvI5CVX{u z^u&OjRQuo0`{xzOI>Mq@DVJqB=B=L&GQ(BFyo zEZ003vXB5KwgYc!Jo|g^g2kIvl-LyOVWbH@$#1M&}Yt=UIX*YR*^| z(%Olu8-2dqVzBs>x&ka{5F6P5BKY3D_W9xqn=a*o?88l+B!<3kw%@)lhu98(a@9Oc zM;K_!Hm239ifT8v_s%&sY2uK?!%;w|&YSHX9boasoI?{;)Znt_=8a598-rl&2 zc!ZN`aWl4}EKme_eLV&%;jf@v>)8462|7A@jCH^GdME+vcK-m|Y%bm*IskgC%5>~U zeZ>MtZ%kIca!`yKay*@fZY%e~@@7tgR$?3)U`ZSL{Px2mg$tHiEn7K0kgsP=jyW42 z>T3a;H7Q4CED3K?dzv4%D_Uup$TG-U-oq1PZuk()&8KNycO(#K1GX9P(5IVanUTj> z7u>KUS>L^lxcT66W@cbZu~rM_E8Bj<>x$Xsl165dMmJ{JyLx}NI2;*?S8XyeUtNt0 z!TdBY;}*-Ek*DiwjVOjvC{X|(m6ASN``{-u?Gl}6N{S)aoyOy8<8zA5Ea+^tYN5Xd zuWU#^oteqgu{-f(U-Q9mK)kH7T+Fe?w(7O;Hsbuo{(bvkLRB=qR#g${)I9?Y`|ox3 z!fWtuzyxr9A;8$z-yKrB3%!_rP$^_m8rIHg7_)nloKvH{y`_3d|9 zUQn&or64Sd0HZ)r8(+v`4LQ)jJ6wTm`fpg3nGA(>n?`pa-rIiH{PAl*xmhh~qo*8Z zKQQVNN!3EQuR?Lwb`r5=Dp^BwSGRvo+hP$(=1>~-Hl57_wGGc4^N$DP%504;#jGCI zM{H3SM=Dkp2bh8^9@v-aG5Yt?+gMB`wO9KAxA3fJ)1pX7aGyDPu%_{{Yh! zWIMSsYGoEc+*tYT#yCSa8>ykq-rv-WakgSN)N2wYSrgC#EbwSvF354gF{RMeWKq4l ze)o@RK4MP95Pgky$77FB{WWO~SPCzDuk^=@^&N)M+a3u5k0YOKM9MmYSz>OqgI8a_ zrr69#F5m&A!gP~RUXwuHvR8vz%@`Y>Sba))<);&i8R7VES(lO)9{f1(b1rE%5k+{+ulxNz))xSs%ncV1wV&u^6n1MbZ%jmK18EvlTpFa6mPF z{NmdzQ2Awzl^V*DG@Aq+k0kc5Q}2wKU$dp-(^SO#KuEJ9{*YxPD`^)cy@KB3U{5tz z4up{zAu3i!RYN14Q4Pl6do}Py3*>I^ip2?wEk3k23ZOQ2-i3}1!FL{~<%A#+IQTIjeTrl;Vw{2R#>P_psFNCl|;=XWz1!PEV4S?5D25qgUx+#K9vU+}?O0@3 z4F}XxBu8c_jLd+GDh&-l*Ed!+8+zfRGok6T#>%<~5#6||uz&p$MSogFB#k1GmRHp5 zx<%ID*}iw{V`c8#&r}bs!x?gow)mdeX(WYG9%_D5yTK#rA92H+%YF_ zp8Mjr%H@l*h6|>_y7wpVwlt-qJ)MR%mUt3J5Jw|ZN>s1|xuLn|10hz)bs@<92sSpYlJ3=Bq)r`HvsGa2XGC2b{ujT!p!b~5RYo09myb&FXvZ|tA4qWoGa-v zma;%<*zyg4eerY!wa^D(V(JPiNKrz79MxaR^c0{qc7|)xD91FgXkq*|0BuH}?MkY)JwF0Ubks5l(jCcf`(F^y&eF zjz}~;zPw|=g74HV)cyh7jxPHQ4cN{+dqEJQ%2c}{y46H{>jxTDtSer_NfZ~*`dts- z5$OavVe^nVBy4>NzHle~713oXjq26%uTK17eTP(WipW)Mfnl?iB86@M^xK>il!=Ip zjZ4Ee)P7uF`d|`f3o~c{GtknefEU{F8Kid&Z>!B@*Tj9jNjLVvwdh)Fu%|A|p2EkHNGqrxkpv&D;7#X!A%ijyHI6m{tI&JmaXfC1uguB?TYWu!JAAz`Fm@)v(dGc1 z!R@vvDkO5vAd;ALg9Q%J5UY1X*qZ0q{g!&IPa*2Q?Y z*k8};ez+-R)JC1^?^XWX;9Fs&k3}ufDzX3>%ILbBhL?RfiLpj|?JN;;!5mq^eOkV#0 z5`kc)*(dL|6KPdgh^Y>p%MeXfeYbbWy z0motg02oc_*@%ge8Hv*F0n%-UKF9XOK|Y^BWNX)91ge^sSsluPNapPNj1A^uOOZ-8 zy7kAh=omTaZfcme!+%e=+Z~Vs0XkWUEC@HhrC_NhFLVIN&tS1&c_R133a;c6=3Cfs ze!Z|pwQ?0`fxtE|5gJJ(Q9ifpwi<=W9z%BeR9CmD;A5{akl4F7b}tizDGzF-mC~oU zJ7dU&Bx+Di9YDC>j`*kbBDHd3Xoj^?@M$EFqT$9;$dy(ES*IPPrS>?@o9 z09;H3jh$p)BQRmfBis*d`NUwDGpABLmyo>d4Ikxqi7P9Hc2i4A+D(pj{f;BQV8vMz z=9PqEk#}9}5H63sUI6l!V8k7CEr>hN=Mui8>WLXFSP-BRPsn_*A`4cNWc(uxNL-(y zVXd1`8{LvS{Jm=(c+pX;g$hpBWSy^nk>lF~W-dz`qCpMmd>@~_Jf~1#Mx_nrBa!{q z2uoy{LpPR*B$0i#Kk0>9w->P7e}8OE${i<$6fXAt@e`yo(%O(fv$(%}5*H_I?5(KD zMVtajbKg-M;uIrgt8Fc|`0?b}q6Xleam@kT?nSxWZ-M}T9zZ)d2lpfWaWF)dNh4J) z)~)Pq_WoGnoh%zlnp3g5;O+hQz|?BH*wG*lVTe#i&~(!icc`$f@BOhL%+g08twE09 z@nqF|1A&kPb}wMne&lxfVqRcefE9u+;MGwTXAp-kPzs{9KtSVSa67c2xwhC2#9)iN zT^?6!^*#Rhi6%0&M5(J&=y3`N(yfOmv`y)%8yl+h$DP%lThfD3Bq-v~AHUdOY5@Tl z?Y$0m#O>wKD^*w;k6;P^09*)3v%u=DHp~UrH;$4KR0DRlM;m&5vFc!m9eloKPin*qKh3a;90T;?azDxh&}Ie2;AeU&E;jkhK(EUZyY=Eu)Ep5n1LOou|&}V z%~gMOjt1Q|76nMI`g`M?*#}RSwH{mVp}MgYYk_~$j8P7$xAN>u4n1!ia#a;e za>dBsyvne6H>^k;)(1Xc~j|p@VFG3J$K7-S~I13eHdjZ(jHHcZ=T44*La(*i#xIfb#h@gHb5a0bx zZTo?b5lS?hBpVvL-^=aJJf#>!x+v+QOVBJyrsHnI>CPcX7^|rvc&v59ERSjw?hOhX zb3=?nyN`3u_$g+8hVNCI8*$DrvW}@58L4R9akm}5xE(0xVn*EjzuyciG<)Twa0O|- zjgh|GbNgT}TS-1p2EYSgkOue^8&ou`@3}=~a0v>fR;U2mdKKr^{c&Lm>FBJCpy{*b z7HfZBPCne?ump9DV@U+062!PF{BQuVw>z8m;}7Lr#cLA+g+n#2ts`x>`S!wcbZhb6 z#xv^?iImw>p}=YutO2W|zWu!p6pC_oIiE6-FaAs z7ejrh*RSthOCSOssS3}i5G)E7KF4v2gkK$nmP?(6oTF=yCY?mk(ni<69@}k>g?R@} zLTXwOklz;j0B?SOYz?F;ps-dUf!((v>{0A_N3Q(uW95LG>j4~ML$xa(D<8P;h0-#}0*1CA6ehz} z9({d|{)Y#XD=|5tnU$c;!=;W|P$R1=0BqvOBSFZ8b$02TXWcMFcycX<)YP~sH9sZM`Kjh4*Xd57+h+G$qo3)GG^p> z{HVo4^jAo_x*&3S;>CRNT(cHCYbm)Wv7`JuabGMS#M@SXFfP31PUDfW`Qbq7q^rM? z2Ei4)#<=wU&NTOp@FdLEW>)!;IOCSuQ zM+UWD_w>T5X0Qf?ZUy1~6Kvm$8Wzk`kr&^F7;M{@gf8W;; zBCML`Na1y;;hOe8>wWM{zJTawZNm#uxw~KrB@iEYgB_AK&%C7t9Ws0)RK? zeg6P#K~en5EQSR@9l`DQKYSe_Bzz#*Adq(z>w#?{kc(i#00RKm$*)hJug??xMF1-5 zB<4yD)N0f;1oB-hIeJcdS4>H?FyKGt^i`C_{c(&!wHh{vQ! zpa>i6Pka(7o;LD<($Edz8u@5nr{*lpEYqr~uesLyLLO=i>D(<^r^z;M1$YvZd zev1cCPL?U5n0t?3uipa(N8YN2n}1GE zrqOI{*5ngi@B7~z%uEAF9J999N9aG60^VOMp{3ZW4&OWot1XvPWNky8`NZIhmRCbC zNz@EV-TK+v1TLpdfc~Igt`Uw>mq*ga_vwaSc^og5f+@QzSAV_*$7GD`n@OZ-UpD9E zj+pIyz*!e#pOyj?4c=#y58v5aEE_KtVj@sc*+S~A zBCmbEo$*+JD}f#CHzZeXFwB#Imx#oUt!Llm`~3cxM3uc7VXNdM9Z{E9YFpcndi~Ge z4CUsGW%_+0^2+z9F%`DM(+TDon@bQ_fN$rBG;J&kN>~LIu|4qD6zarsX|$2yFa)xK zrn$4tdiKX7n7DOBP}EylGynkD7Hj*gF6vQ;kp+vp6nPB8t zOg6MWQDWA;H^HBMwci^C+l{@p{{UPs7GhY|Q5^-9La@_)!5y~TcEpj300M)y#ErY) zYJyJm&relLdWiZDU!NEYIi>@>F#z|k_c&?FfvHK&(I(+Q9=i^H*tEsU zBufFL=_*07zxU~mNa#WckgkwJ?_rITV0PMtdtZEUS4;WA&>)|2i85*Ok}sC;zt4;q zt%KF(I(_W>W3Y`f09AlC4yCP%;xiysP5jz?z^NKdov{>VF1ks(9QyHy3p-${k~;y< zY)u@33}9}W8|1*L{&*TGkm*ITPQx4$8b##+us^?+ETF9RWZOzGuWg1R8KZA0E46HR zHIFJhtdYc;o=VabStRX49dp!9qn2rPTUfoe+@D*;sv$M9r$`s5C_1PCzTSrR`(xV$ zAR6AludU!n2|G~R(-WpxoR$O|LOByMrg5L%1$?-1?fp zi8e!7HCVEV@F@Z08@xr`Ij4@@_>_Jh;1K0Z;V0w#LI(DoEKGljW@FY+G z^W_A3`yZw`73*Ckf%e-SS8nP+BoKWn{{X+HAg4*-e8{{)6E{dGieCGkdwO7oJ#8V3 z*#l4n8#h)5so(kH5nBuCi4|;-Rb#&Y08DvUt1|`Eb|9YEpphWNs474alJqEF8(%%Y z(*j!1<%1ozuUtvUpaR`+Ju&M8%A1N`y>I<-3OIIU7pHzlzk0`10?VqvJL)3qleQqu z&egyIyLw=qO(s22i{F8!!6*CrVh|deQX7&%C%?WX&K*sHP2QttVk-mIcoZ#Y*uf)c z(1R${tPa2fTXW9F{eF0b1dNArj#ODSw+H!w{njF=b+9A=8=%Y_x}KF7B+N6 zbwnWvrl5DO;Nl?f>Nf^sL!-ostQ*cZ-oU#afQ#SP+XY!lJ23&}5+5TGWw!mc;QWRZ zS%QV6v#1*fW6xv#^R^C=yD~6wBN9s4(_{cm0zRKiEw8Bf%UUANDUVP$O%-pZcsyAb z#>3^d3kcIIuTkCFOA&=ETpf>_o;U<<1{alCi4pH~;fUR@x4j$v@NAx3&Ls;MQbtxF zfx7AQ9ftyj=jVvpDMj58mNt4aD@L-dDPRC6w%`wP_n2#{7|M zu(Q3^IhZ1(X7IK3b!=7B=nehCx}OWM`*Va5YFytJCH5=FY4B zSApba5N1Y~SzHZ=P%8V?k>At(@Gu!Kpbrrb7G_3py+VBd0H_V!9@}>ZZuo~)Jj9@3 zrog%&k@eUb#PhNZsW;(_0cD-l4*CT^aR)iFBtz+f2=CcxLU=ok{V-TtSXE%80jQ{Iqu=Is=J2}eWgvzT76pZEUZ1umiU|z+uJ#8b zj8iEca=L9mB7v*z=uRcmBmhup0=I2}@BH}0R)yK1J5}Owve+ntizE9uppyEG#VAO? z2pPfDwAH89fb}>u!bw16Sils&ra)Q)(0zV&h2YB_i)_Zc%%_vv_CIVhNRh^<4zJ>H zMF4wYF06c8L}HI*G=$JPS+_i!AJ-h9;X>$TYo;4l-xH0s0&`H+FvnN(Us+FjUEx5kK{{UPmDBk__oN*8<0>eOH z0)i{c>*?E^c*eTUk!HK5$3S{a2TzdIMgV!6fNX^WW=^0zx*Y4ox}pY`g-u=!O zJ<=VIw#xaGZrdv<6fc9n?_&drwIxUa0l2-m-vyY{N&wOg{@hu>0|eIc0X0}ESb$03 z?Y_j{(;61hmT!!$9w12|DHxd0GL0Db*&&X$$(usjVxzrS2e%B>#coyC*ufA5Gi zq;^wxr%?91R>FK&N0~y7RkaEf*nldgj+Pa7zz)z-M#T(zQe{XtWwA*r~`6u z5it<0tSJ;gKAxY*{qSX^4CtCPfOaQ~t@p&0!488+j>rVA{Bl2@2L6F@Ud6x2T8Pj< z2ioz(CNey#!IX+U{r;SfmKq`{A4rQ~AX*G;+SU4UFYAEIQbe61{XKO}xD7+T2{0UJ z9A=aawr+j5!IZe85YoVIK`U3aUHV_&5QS~&S5>JMM)2AsdQQJ96&k4zt^!RVkc*N6 zs?rcjt=PL^`Af+5={?p@7Equ3~~lwN?wppC%>rU3%_Id zYJ_K572Bv3IIVyJffhLV;6!N^OUK^^nE;Sgl0fg?usTkV1)El?RnxV1H|xjWoHZFF zsgNOQYm>3|$4990k?DBp1-Aews@sfsCwkZ|lYDQ21b`a06@HlRnzOJ9KqJ4~zZjCP z^a0gEih_5%cmT^GD#o|4cpm=%QZWi5#dU0LcdH!zG2kglwG~?vxx|#})F^qqMb&S& z_v?sNk3sK>ER~8iK=X+J!Wk`Ayu0vs{$p%JZWxPIHDGVI{Ng}$R4RhfmY}tMzz<$= z3nU4VwJ{9LbsIdNU-!hMLb0~!RkkNk$CU!GR?;ldyBlD#1u+r`(q8Bu)rGgQ>OUwK zE7MhIkxMZvC7s6LgY~WZRt2bAYNN0lA53;YRFb#uMgIVO@!1hooerg7NCLga#2Ufq zR{J<~uf)2x;5A|&6R@%DN&a~0K3zcWb~Jg%Lk$$ie!!3GhzsoVEjSEQa1O0oakuyCyXuS{+M?ZxenX)LcJumlm$EKjB=x(J{WV^C~>PWQcU^uXGx^e89% zyknt8jY>E47$kv2RHsnel%fx(Jb$(diw^>@i(0CpDuB!hC!hA?+a6sg%<9oiO;vk> zMOgO!wYCN)3kqp{GJGX;4Tk%JjzbBNw5>d>wifGqut%w@+usTL*`tz4D1;!Sti&T< z7Iz2b&&+$`P&|S~mM}|3b*_f^NZ#!6>w&0=(b(#CAQdFg8*UHFZNbN3bXJt7pKX}0 zJM)TYD81}$2 zau-W#L~Xe9kVo*6>9dJU;`#`-~!I|ADgTUX5P zc^;T*49{9ue931d3)0Gm;h;D*!utR)s0uanwQNBb&iC5=P9%X?s01RtsM~YgbK2-% zBi|QV3idyjm4s7}v65p8Av}SpPOJ7iSiqBTs=I_FC{-*g?~X+Lq{4!bTm?Qs$M?P( zjHwnKM1Tfk8){>IgWGMp4gIlkMahQzMUY5yP_l-OIYE)gA&3@7AN_f;cefRaanc%9 z+sZ@MlSaTa399G$<9qn$!6b{sd`CBzmnxB*-%P9xY{aydXx`wVBYLJ&-xzuF4kI(F zlM1Q^Ad4>7TN| z^JPPoJ1NxaHaB9y+nW3Fj;tbl*Zw>WmCBti?Sgq++06z=f*ggX@LbLX9HEjDf1ha&EvD zF9XGlb3`2JRti^&2UQ;X`*ZtZEgdR|-%Sf)tAl5srD26=ZWJvl8~sNSSq*ovvBmvy z0?1_4mOxuA|nN$eQ)0b&|-oI3iQAsP+dWyJ8m|*IFiIP zs)C{UAh6)q`u_ktIhn{wlz5R^TloNh9$~nN`=VJn2?x}&^#jD z%Bdqtu^?;KEcR73=AfQQeX}y z@cLqMkidB~5-yB`%f8oy{{S-OBo7M6j9Nn4X~Cnut<86l?Mo zt}9BDrhq3=*bj5g1Fxt6(AL|ki#AX9`(nNu`UUY2RW9;@D-{P&y+D!LuYSh2#9g&3 zO7x*XR$ivSf2IET@nzK^%z#BrcIzQcfQ(W>>so(+@m{{Rxrq<$yjGGrljchtFU zXHXthIMIF} z@NBL}G9<|79$yZW6PJ;|Pg`muao_hmV#GnUqU#!gwTz(#%>ifk_P_$yPA7^8 z-&TSPA27X!*WdbL`zuI?<<;BH;RqU;m}*d?WuO5f$kX-Sh9HrKScHX6YDle8duA(ljG8;8uY-;xO?Yx9@@C zGnyq3@|B1qIj}byHp72oSOaXN8wxK{@y_6V@9l?%Xf$W0BvvPT)GSp32=v>Z-yCTz zms%{kqXD{S;yiRgt*nf!P}P8=>Nob^14IQxaT8Cc z%V7DxsKe7vvmi)=%NcGi^qT$=>$mJM+GG|otrL<+1pbc2b|97qV`8ed!8gaTPNeNS zDVP5M8VuwQmdef5WT{wOv6UqG2s>EVf(Sn_4i_Rj)VE*@R&DGP>D!E1OW~6!kwk38 zNLCz(+-QcSKiQ#dWvELPawz;o^U;zsG1 z0)@qnI}OGx22hk>KvpfRQ&}Wm^1OTUY+B@u-Sc}fKK}sgg0wBftzb(*TDwO6(x zq^U#)Oag^P7YX$pega;W?{VqG_c?E&5=R5mXjx9fE>S)Z$Sd z#nq6A<8@`Nq-Erfk@T-!{+N<0%)h3#&qGhkZI8%|bifX?rGkq78#R5uv4XT`At5^VwswC})n<>%F z$(E3$(WI;lnP4QnKjdx*ucp`xaG|zhM6wksfKbz0oB8Yq&(nB{e`DEa1(lqDH?UWm zBe#4>vs$9kgoL`YM{iGU?BmeDzDuWK7?H^0{{YhlIdK~KV*~&k>_{B$eXzF_*!W4P z(7v_MhQ2BW&!>KUu>j^bk(dJ7M$13}x2E53t{H{}om%Bcs8|ey-4j3o*qRsH05BNc zl!2;lo3KRoObW7Tj3Adki(uMrgo)DL!T?@RL_t z@9pdDiv-Qdl~WEyfPr3>&Q%rM4hiSyR(I}jRM$cz?oG0;Ad^QTGshf?%F4{dUi3ih zIlcb?%ZPOoNu{t07dnV6H46IuJL2wK?}*I5t(bXLRS>sCSXDNFf-0|%=ZfZsGc3ke z>P+(j?;!@-&g@Um7Naj^I`kyROqQ0X%)`%gIUC??e)?|fz8hy?!EIC#rI+DQPTjce z&IDRDMjmD*2A#de$LWKh0qbpnA-tsgJMqV^I_S%4ekHHs_8)t}BUN@9mCDf85pAnr zaApytY$nPwbtpvaYe-Tu@+G4E|_LW<6%lbV7JI8Y(_Y>G%Fxv;Mh%GQf~RcxL(FbB`PB{x_Y61OcO+@ zJAiBtmm7O~U~clX%w3qcvI*Rp-p%~6DMp8vLOG7e4I*hZ+Q+XRxb$(n?f{LP=xZ8o zsZaEyf>EJvR?lV{IV5IBI%I7{nQE_{ho<~sag(fR6?{?eTVS&TEVt5icc#*N-yOX% z0=5U~>80fC4efuvE~ul88~K#)AZj3Uu*Vjufqeq3UiNTFHCpRbL3^nF8mqC}{cj#v zlugsj5e3jCl&xQL^1H=xC6U*_JAudW?|Afpg+sL!vQ@9|=zXwBpoSOPUbn>XQ4FSW$G3Mt7Bl|P-*ID)}U@ldQq?^@3zC#;6UgN zK!*U`_BY7KfHF6gD)vwg)_ZS&acm_zsv34jP+CrtHI}tMs15ob^yKf47f|&Qw!rL2 zY8!pd1YMeCY|wI{K%?bilq$BMXzYFQ2+H*k8!@rmD*%4ygNUJ^{vzdLMt+%=sFzjd zJ}B-~_BCVcfgMP?S+#B&xw{^}Y;eAYSBdP4BVPXiD)--v5H6v-60t|!)ZgiW#eBEC zT7x6Vo3omji7!=wFC!Mr6rI)k~i3%dyC)cfW-+s{j`4PWAFQE-%@9m!OK%M-Yw6qR0UY)Y^%lKnL9I?}#ZOk%;mTK_s0e ztAk_Z_s3dDqsx>*VZxTzN<}xr#{G@_#`u#JEtr}XjcKi;j>g{J+wn((#xIK_njs9V zt^;1Mbc!|4BiDcV#jl6>ZfC_J%nuF|PcB#j0FiX*BmyeQ2G1j7?TUtuOuS4S2GDJk zp@9Os32hG#Uuw3$E2sWUqO(X4K^!}NiWflz$8GcN-85z{kd+q#&cFq@bjs zrlw^8GBME6F)*{SF|)EVv#|nM*_b)G#Q9i(BAh^G0cCy>Nf~(sc~%~<23QUzE+;R8 zf=@s|N=(X1PR=GLz$qa2|1Ez903_HbyeMO6C?o(>5)?ENl)obYIsgC_4ds7b{l9{S zj)IB-z{L8elqUwDprT=5V4(R>89nMJuM30f3g1`@PBXsbPUXYI^lm#{1cRaz$oY_82{(8P)S&X z&`6b7^=#0|g#V!{lb1DLv)K+V9{do2|J?%MqoMrs9E}8^02nm8SIQHhXA1v7ceH45 zH|JOb4M>0vkjn!*ABJFksSR9(00$7aulY!8+F58mr^gpwgyNr0t=$)N-o zsNnvbDC(Gw-o}Rql1T4~B-L3M&HwEFMwRr9n`8RpC2@3k6TyFf95()g+7=Uk;On7G zZQ;{U(lwe59pijajX1P}vezZ2Hx{q&)_orUVxW2gUP=F$@DAijYyI9L5k>3cnF9mt z5Pg;I(b%D+z^g>BD7+39i!pqY?FjPJXD4uZW=p#Gx~uw;JUG$R#4rJiatt^q-%9JU za3Rw<*tI67JzGu=Gm3l7On+59MY+3Jn<)gERZ@Az>SN4Kv9ULasN+N#<@^g+VT;b_ zB@>?H7wfVl$ux*y5iK!Vp=NFeorEenIEOtk-NNc7n zRQv}4K4B2AU50>6Ye+Ad{VWwF;^eK?}aO4oKyR{ zm!u_+K>VvQRb}qMzdTt*l}YF({S5l8&P=3p&Zt@Z-SnJ)HFjd+krzRfP5m(9XnXQ6V3uVv?pmN z>f!l5Mz7LwTU17VYO1d^Kv?>~iX!ft0Z+Q=37DJx5@f^WVgt8n)eeuOu?8TW=FnD} ztK*{i4mbN^zW1_{iN8P7Iw_{{6nVzQj$W|St z6etdSZho=L-db>?ucexJ)BEv4>gL&wxP()a52^Rvi4{!8CwG+73WAiG68IsWbbVNb zwNjjz4XHcfI`L7fkrR4QiXTow--}Ac`x57~0&?QLl~?e6t%s}7$6@Xdq&2*o9_ct( z^bvG}(CN5Xw@mE?fgP}sBI3P(V=f_)P6|VP))d2QB)Bdh1&@5G!uWz*jx8x0SNQJi z>L?(MK#~u4TbJe9zOdbmZNm@PYDvg7C`FO#aa~G_c9J}kB7r4(eK?}9Dnhec zs+3mFfUS-Kg13vTKN>X+mX_fd60Lroj+S@QPbsYUiZTtseG_XJ4~MXT$EfwA^>|EC zz|&?)2Yx(;;k87p^_! zJ7Edt+yuwrW@$P2(m!LK{Db3{pVgux(X#Y8o$i3ws?pQTOW)2yk|mNOrzNQ?WU03U zTj`q++h`fOu}FQZDtj;0bgx%<8_FLP9O-N__|(rKv{Lo{Dg1ki5IuBM148_YXjJgt)k>506y{lqK;a@Xg>bIV~ zJM9&Z@ySM@LZ;1DKGv%KV1H2YyX6obi`WA+Q6vVjy~p2#&T5JVCS@*QHe|}Q%aBU{ z1rRgQx?+CzH8ZRgK>;8DKnRFG(D7N$R+}uPr^!0tH@*d~!lzg%a)Sb&R}*WxW=gzs z@1xp7sgt`rg_j;_pV72xVaz)q!WD>9Lzj!_$4Ar*5yw3xU9mwnxQn?+>lAKeTZ*Hx zi#hH9KoTJ4a%A;ZU5AJ+(%m6(x0QGcb$6h@K=(#)KEsFiyGNS{hcz=-L51#uzZ+|R z@A$EqbGF$g9_~}ZSXD&sTh*)b^{#;LytQQ@SQePjQYqq4%Mii?Uuk-{ZoS}2u(p(*XHe+E^I(|J9W)@Axw*(f19iLRPJ z*SMzS$)>vfZxcj@u-?dw5=Ls^0fTGBs;sb?=x_0JmAG2=6ubj^i`rU!nZWwS;(&cK z@=OaPX6|qQG-aWXkCUfWZ(kNO{ZAp;ApnWGd{cwD7k9O*xk*(g43)ObIdsAX>97|x=%;`a>sH*`=E z^+PJvDN~-Q5iMFCB&9u4^ep;%zh0CVa$Gnf&Rb)|cIlaP|3iFlP#{U2Zp}-20y{Br ze5Wxg z9-m^=3KWFGwGWDzt<42?+BF#dJ4dN-ZE86s$0WxJkc}pSp=o8Yx(ovWgeo&bf2N0s z4Cz~9C!Dgl7`~vh=bO{W@XLS{egk%|m4+Ak^SM7wxKvpyNMKVa>2nxVmgkUG&mi`Q z5jp9Xh>FIVXsSJr)bQ3_hV(3bN-%+@E*h#@9uLXKxl3xSxn!&qoe3I`jpN zU41wl|CSt8VBar>#MQxXr=bi#*!_~e%Q`j%OWA6!d7Q7H@mOr&9VzGx5OraHvA$F6 zi{#$u88Mc-q#25O%b5M~vb5$B(<1|Nk^4!lV&3GFVsJ1+-#BEFSwvYma!{sr4DmSolWt7|yRRX0j~+I-%K1Gka1 z5ryN~Y7_1G5&bW71|D*5W{1E-HTN9s+sAI1KBtRhXEF}MpOzCLRy*IO=#o-*^R?rI zORGzlQ>3Ub%~@C{@zalsUK|OM4EQT|fr`dLA08SM;V#(x+yv?h(?3&dZa8WY&U*kH z2@Rr(R!&VqInXnh{P%}FuuBo;_k^nN(;Eqi)PSF5=VYfJ#*L{C-)=@v!;J(*!}3no zi98fz1^<4TQ8s*WnBu4Dc z{5@&h%Tuz%)7PJcOQXAN@%c}=h)>B;%$0Oy^W53thjen|a4CF_nZsT-KXODsv`@UkFK?y?qOqVWFA(L0z0c{V ziJE)-Ooj+e0vXv;CLF88q`&5cKcYG9&IST7tf8k88_Z|#E^&)(Vv`?&heno*PG<`( zGOCWYl*N66E~GXRY@)_Gm?>xO)}=(e_Aa9tnp&($@bS2W>R}1uI4(6Mu)Useu7{MMaaII#Be$v$km7ES8 zaWp=~G}wsVs-v+3=dZn%^s~#2?mDD3s&Bc*b=KNNGB06f8!zVeg^WtfLo>4+2IYDUMtJ8-*Dbv(AVGW>?+qZ`#ERSL;cD zL3jdT{gJK`-!NmdbX;;U$`^zqdNxQ_EiCs7{seAyj%`W77Ou&F8JqJNav5+fUDbkq zrqYIl5^|FYuFkye%Po+$dnit7(v1QppeR-pDGolu$?^{~udz>B%`CoEsvhLg)n2a2 z*4Ap1@;5EMjgj~$7kD%!88p4|*+KEErcWIu$RiPJVLS{s*!!EQ)bZtJ(zfrOH4gqL zcmm&(ieOsuNvAr_3}ogri10ktLt-! zsrfeU!1p{HUJrJ04`?XK=;Wk!CT~`pgue$sS-o|MrVT@10I#$(1;u)7E0 zGV==KP&bqaK;i`C>}>P|gy#yr7@p9t;EC-21;m4Ao*U@?5e4iJrtQ6d+=s9k^TZGbP1AZ64eeL*vTv$i3oWI- zQiw0k4MsCve~gPpS_(N_b8~Pt+HnK6h)dwM&-xuN1d%bNbK>KfZE4BRWb>L-_f8v?vz95C-FJIOYsI9k3Hl4sch+!W?>@)e zH14XZMDJcp)2Sv7`*6J$_;)pR{n;mV3n}|QR>xaa%;JPgQIWz!0(q*`QW{tn8p$B6 zJCGq)DJp_8Duao7Ak$ojjq?f$(kEZ|af2Q-^wdB6+6?%S?x-|2N(=XT0M#|<{7_ZC zDJs@Si9LiZJbYVog~h|;`U*`xwE3&Ag1GCI+qEWj~)L;+kB>Jug zxDky@{vB$Hp5-L^8-6WN&!ZA!#fRRZl@wF?A*Sxtr4jlSUm7Injk^i|TnTeHdALA7 zK$K-G4l9MmiZMb}<$N=bX^0|W0F|+%-=)R zTcWxNWF8e2anO-K%10!QI`-JS!?f0RX%D3`Adb?r^G)oZ^bhBfI|iLhV3s_ zl5aUKrw1YB<D)O+J3T(XVyKrjkcov zaSX-X1}3<|#t3vpm}4MpIQeaBQgUq(Xse~OGvXsP0lKYWV@>tWqKCX=mJksA z&MKsoSm<$%>v5qz8U68d<=Bl4=ltf1S{$X%o7a^K2BC)rzw-;Z7SI0h(nURnUHjg) zZ}n$5$Jf;2pjO^Z1kXpm8GHX*%Dm|w<9jzdLZ_I7&=4-&|R zahJ+2t8yD~NxB114ZHYgM{rb6HCv8I^KVZy27B;DTz`~JR+rV;|G@DHETavBej-)V zsfw`raW>3jPm(Eyo86z;;-ct)DOKTo>!+qQ^a_KjriT+9VSMM3eiWe%61>e82;-s% zl;Vc!mnoI-n#T(M@D59&*xGPL{O5?hV_vhyqjwXd9aGIL&-OlOMFfL!Sebx#yI!tFOIKbfQYhFcKlUiIL+b zTvLA$AZs|mFv}^;VnXZY3JMR4K-9|JFWGX^<4!%sdIuWe+wrf&8x2u+dNA(BJx8)D z^O~|ABl{uK6H=9=VIiXvoXN&mnITb!NI^yI=cD{Y(=0dczG}GX8U6Eh>I*I8G}{Cn zB>JxU#ht>ZKWuc&d@`0Awig3jqMV^|&neC%l@4LHS ze*tZV?~v8(A%sI@H@&BED-Mu7)h^$hH&@OV;FN+VA1Q z!UrfPTx7p7c=_|N0c`_nQO`gj$lY=7PU@?}Y^Nv@j!B$;ztxS1ER@G7zAxf7>2;rA z`06arYJrN3NX-@+6HL0L(BfR5R4K65tBHH30L_&IrMXGo%(6zplH9|m+)n;!r$AYK zx)zPAm9l%TE?W9$*K5_+e*qbsr!Q*ovcA3c+f<2IQ+umLUMjdZb^Phc!SB?ysi6?B znV%X|B>`{M>Dtd7?x_jBQqI6_{{nL4wJ@W=2AM(;_{G#$U&s%SkSBUCf09qAdF*@> z0SreHTbYe6jW4KjpSZ-4l6R??m-p$QOw?-`G^X7+rCu^^lBLX8Q>r5Td;972h^HD@ z7(^6ph-IdzYOa``mm0@#X`J(8Lr}Z3NTznG3P9s(M=h-;itEc6&Z40vcB$4e!=3yT z#o_bN6v0QYss{S*a}4AGW~vV@6W`+e_*6C?j8PXH;6t6%89`2n`_{>>PT$=gA#`a( zAq5wYO=}+x!`|_Ux6D(UK>dgj-TZrL#^$NwQY641DIQZ33rG1g$s7eKFSto~$(uyd z#Gj`j_1H|rSoo=HITL;+pMjc;juEK48?8lu0V7@^Dn>?VA&JF5?yp+W)WsxEK1=S% zrUdD7lHbfKN5>E&DtTBwGN1>1?e|mHK{?svsOeimI*} zhcQl$Y0;!?*y&0O6VM1r>L+;5{}E(gsJs<)OTUa!T4cn^CYNu7?y~^>&6<4k?o&f= z3VTwZJgjczXtmn!bLP2v45K+lR}{-$LS}H-lcP*E7RtUoHPMZcU@^|4?Go_4!7x$b z7u#n@KL68@EFeWgWKw?EY%aX+mfbFNgYnCVkI5dlAsRtCgRJnmj|<%B=ISOig`N+6 zmKbvuLgq_O{>ux?dLIw)s6Vn?+Oy7v7%!CP<()Os6FCVR_Mu4sp{4Z4AkK>7f4v27 z%p7>~il2qLIamQUUq{cw9houqFTuEmeHNjt^Fm!oRjxC-Mnm`Mbr_C>ir>aqut#O$ zFF$jL)eV8~UW%&LLw}g`-;;2o=Otz<$|%+LbA|?zOySnD5!{D#3s>Q|5b@hZ%r)$h zfa+=Rw-@RsEn4slJn8$|+?l=GADY^aE4!WH^dY@U&w76WaO+X|0Y}x}BB&_d@%t{U zFQ6nlanloYR6>H~Ju-v@uJAAM-f=~o(z3m$G=9Bf(RCi3d-Ki;VfSqn$KLwWlQI0m z>Lh&og+Wu39z6CxFt!0ln}>(@WH9##v%PEES61#vMAVKknFGnI!aAJN-CusiFJnOvlqyhzy>WMY!$B~uIjOHUro#{@Qie1uJNCdi(t zt#9EdcJs+3<_&mKkecX|K{~1MJo`2aGZ+4|o9x07)q5Sl*`QHuSuFZ0!g}nIsUcUjDK0P9y~$X`#FG#;i6^*Kb}*Z<=<1aif{y2RD!0fgYXp7fYJe!#ZzJJIUD(bpuWTU zJOS3#2mW4C(m8lLVOTstz+1;26Ga&{GeZ{ZV|qeHJwgM-z=HcU2iEJY-r6$LkYT+a z@AJ0;l2-Tcg^jAH42tTKYHG32D&HJ1xP+`>OIA#?wNCnjTt4U=%VT?7)J3HGERlMl zp9x;rd>F-JkvsSOAno-9=NIuV+B^(mcb80Bs6&l>?YZBen{G-O{;0i9^^mZvigjTMBwtccTOXs{NmPGKEDmmdWrqRXuH0}lS^I+ zf@(*RzPXBW=E1eXIFTJlM}|iqk0^-S(SYa82uUg(n&Dk8r>D%U^9?vB(O1Z7&Fa#c zA7dia*GZOQR_<$ zQqWznFYEn5>U`f7w{Hz-rx##u3SLmdLcZpt|DDG})(V9J$t5V^3PTRq`#6|9+o0d! z@GlMw8&VZ3PJSfsa}*z0r#) zu=bLK^+X%`Dzv{;;G9PJy2%wwenSgpfTI9&(SFkPPf|~88`w?S;*6x>_`wvv<7}ll znZ8NvNxQ+7 zBhMM8h@tfD&H4UaYC9I+Spn1cJdK`8USb2KFU3NGKl%9$QunZrI2@`>eYG0e*&vru zmI8y*qQCW~k!HDE#@SXOY$|n|vhU?>vCaR*M(?^X7?h^>__s}PSMvirma%L+<3y&> z$c~TP&1{l$GV&1cs(~|?7fI?uV>c*KBWm%bUwGBvk3$Ol|K_xu4Arc~#F%Q(p5qIC z9teKpmCQ%D>_miNPm!jsCOL;&wjA^zi(PD%B|{r8nJt}q9JO@AF3Y6j0?U;X0~*;i zC3{SYMWasxnNsX;MAEq5s9p?3XO!#Y@u>xKevopy@9dS za$lwO8%gVd!UzTyu#18oHOl)T8CRROwxmj%A-};GG-vEuECGA7uTA#F96aMVveOTW z?!3%!-GeW_Mb67VFf9_P6(PdF=v5eX>mk~)U%W=n#tP~eXUjDWdM7;6nX4x-OJS49 z+;(~;pOPPrf*voXd*6R?nL21-T^whTY!d!>os|~Zwgc=hZhS_Io zZhMcaCmW`@EvkB&zzjFyU*&T7Mk`B+Lb3myf=L>y@5bbUxhX8|7l7>&u7obm>w5~> zfsO{5IPe~0lp7S(e)?)w8^U?6AEI@iz6stxUJKx*OmtJ!Xf&f9eg)o-j3M{dzOUiU8m0g(%> zA%2TzU~hJUjsv4W7ur+uWM%1Jp6K{sS|4a%=|DS#A*JZJbc zuN6(OSj)eyb9qL*>UMSkp#vU?{cju4L|^@Z>8pg{b1x#UlT{xR^ewRx^djGzzbBYd z&XpahM8hwsO{nNJ#WvJZA_(Rp+EP!Thz%%e5`iQRO*7y$D_7^~fFm%n$!nD>|H75EUH&Z$!_^K4z+#Rq` z)^;YnQeAT&p-`p6!#bW3)o#wuO!jBEPRXXqFF_?0o~eOhk!4PmwUuCk;@lJF%{f$E z`09`B@^1WbAEjlTiAH&~Zu-VL<`2!A{#r) zVjpAk9cT`i)BG7+SqsexI_kFe85c@Or z$3q3%rR5tnrZz`unw-QSNiU2R1vI$^n}!>_L)NO*)wjDgT26XP-LfSNi(mAXaYTOy zvH)Gu*jyBGdLa7NrkmpVtf!%b z**sN3chaIMEh2IRP#AswiuZeKbW|F{#$8+|tsd1$c5aLX_pYJ+SoJ4z^u zG4XdL-p5|UZo;NUs5rf}VlT4N75TAJDMgS^<l`Tu5$+%l%ltNT4{kxEED>B z=$T9hU|ND>53HK|l1zwPTT;&z#T0|NMvPMhGv2U|FWF{S(6y}a1jI!PZ!2CiP86Ku z4IDb>X$AM$0KMt##Pg_DUqBzz+Fa_b`6O{ZB-4O&Wi6P{aF}49H~!;=FK?c8Ma%4B z_N-(a+9n#s2pcK37_8KybfTyE67kWXemCvRE*lgEU1 zBsCu#{V-SMoQVAFl7X-r;rEqW@D3Js%@w_047RmxpQyP&C`u=PRLCibhEE|ngU2#F zf=JK+NUHfJk?Y=Iv^n+VwuWk5UiIBb38{%#lXFh+LgYi--QyHq*K;^;H~1AS>g@6> z^C1t{Hh=xnef&O7?EzA2igduh1Z<8bH}xM=?iZVPaZOT1r49UxqX4YY(vY_7l+Uc0 zdpQ3(Xa9@v2EO7#rqiN->u0sTzEJ|HlfZ!DYZ}!x5_N4`~P=eISOfL^< z5qV_Ioa}sUj^mqaaAij5l9m!1hxc*gDwELXrAhyb1mC&@EbJC9gTqrRsPpOZGbJC58uYUJs^)ntpk_Fn@-ysR@tKUcl_0~WfP zsItv+U1A6#TW>K4w$f!w7dC&*cb25tXGV2D*PW9JRwC$ld(7b>I95M(6I(o!! zMZjkv#uV`uu(T3~7Ple<-=LI7(<_BCTSe()_EXmWdZf}0GQF7QXY@xn!&%?J4T7wI z-3uM<*)zOjU;@eD4DX0Xk3NBzg#?xLZN6iudG)D$*5K}E>~R#7D+#MG^^4{z=zI?g z70DWt20_|sEzE8aXYf`ztlGf|@9n-(WOkV(-H?9w%bKny^FzgHq?aiszVTOcayRNt z*dm13-5ST~^(EQ=a$vRSsP$d24rUe8YQB#pk&T9;8@hnlo&&6>#E%^+Owp_C2cHTi zo=WGOcJZb5{gaq3(n3tBhFatFq^VodvP#3xo z->f~jJVAQ9eQG|gZ)xzQG$^oK6uvSzSS#)xYAFgV|G@DO;3!YXH^62UEeuA_<3b}X z8xYl+e87}Vm=UpvBQP4rZ>e|P z3le_ha5yparFg*xD98q@5_L$pEOu`g;zIE;RQx=#Z$H(EE~~#nZxp1gwSo9cvZ0E7 zw0c(-NS}H4eW(2xOsPn2;DQZkC3Y=4)eIcBe+&f`{zx*TH5Af14lT=(3tXqSDKA(X z_|ombf`?Cu8jCB*Fn&tFf~@#N0FBbDMC+K(n7B?p)ntGaiK&oiDOJFKJ88KgXK>wA zIXc8P)TA@Vn%;3RFur?A-_2uBB=7!#Ddo5v$j3liJ0Ofy8j6&D$4SSgB{HrhcKB*c zQkEg?7+oma!tneXVS+4e$)1n{gpE5Z-|y2 zRXBwDNxqQHUqfwrGtM1$I;+dsi%h?a5fcK^K%d(5#3&*XL~sXG%h5X>xNL`2a`>Aa zE~@v{Q~r??&dNABT(MeEW}(xbHYezCXB5viy~t8H?i4BArQfIfk)lO`Rvj7}c(c@> zaw8fp`#+J9KlV3lZJ&dMVoe%5!{I93z&U$Ao1-Hlj)A!2NvixxB^E_rme_wBn>O6t zIEjH-nB5{fTSiA!zO!2*(@AyNEO9Lg!6u;#ks*<;FqR1O&>aHq5 z`b(FGJLd-&9m{_THu{pl=OE)VOYK9U0~52_&zXTZp1Fq>D>c&E((!m6yn8c7dN5CR zQW~&g3WZUk3baYnhLQ~|xw>*kyT9;?dqKKssS&X%SyPvw!QTA(chd=W`8PH*8bk&6 zS=E&9AK1q^!RB`NN3lJUNr@B!GHt%c2=+EtZUidwSWa;_AGv8@Hzb-Jq-;fCQbd3xf^I`&3TNaQ^ z4OB&KY$TU^GpIdq%{WKueUM-_X*!^s|e;2f3bt(3kV^ZbN>=J7aEfDOj#F;3(c|DJW z1=mr3IddSNeVNE6#{PtxDjA{0#C7|40_tsGumFLv?<*^XmjoCx6V4x^?#_1`OMMl* zp}rQj8Ut(-jkph zTb=W7+?^ART^Kn#vSk-Y1bM`e)f`kc?`^#=vd#-oWhZ$8zcJ8z@r}_@n-+Gj5#!d! z&&+$U_UW}MJGhSjKEj3SrD2dyF#8|Q*qMkJ`XV2o0)Sm6c0Dt?ZJ=F1u2nH(c@d3o z^s(lOmaLRGp|cTxmB>;=d~Qe!ZKXA6tlE@{5vyTRBO($odU3=k$gp6i0!NLh5U1kr z7uI82qIh9ri2?lwEH2OY+%17Xnws_bre8ligB`dhVsNd_25U!buy=*(Q4zqtmKc$= z=%7*kiDdH@PT?toz!aaKG5PH&X*ZG;tZ-zcm06vF$FHBs1&GgF#*qYt8GZ&4Ew9Lv z(Z$xZnvC8Bo%A@eJ~7EE%3JJrshOJNhGPB&v~=|PgCrx5u#0BhRwA>)6=5Icpfp#+ z@=hr3WGm_X?-|UQM$ohHN8H67EV$v#gysK?D!}BKv#8N-_wUw`y7g^#ab~n$iMxRH z96r)?G<1X@lip)(yckG^Fs5Q-q5R?_ZYq{Z0rIGWUa!0Wy`x6@`~-&|I!!JNbsVI_ zIE1I3|EjfH!-70&a=Vqk;{$@Z{Xpl&xL?&|-g9z=O5^=>BdBi8j`I%moWG?=JSus~ z=0RqQ$Ez;1?HHP4jmGA8TKsf$u?Wk#2XZ34kN^zLO|p}&;f8%RLbpsPcE(KCdR@CKMp1>+ujII3Nwr|zkalYMQFA8EM>uTglpAj^1? z%Ikp+3-ZrP+ruPR%-c8Ct;Alqu(!b^^|RE}pE8wk_PDI218od6M`rZiWY6l3?R!>JHbLm3j!&q!c~_Dt^>~-2 zz{}2E!0FA>ZL=gty|g7|TF<>uk=QI-IOyGvV@K*9-o#?EI>7qL;3hi9|A|4L76*kt z235#qnP^0(&*N^)dA*y}@z*Yq^CQZX$v_3#$fM#1@bNm;K53OWY%$lkP)s5lPmSs) z>arKSw~49Ht|YS)(dTEUQK*8&#zefxwR&6~?qV1$yD!2QwO(kW?xyi_HX;+X*njZQ z%&ai7fH0}xw4vbE8;O;j_?0?oSlt3%6cRzf!O!@$AT?nukQLQw-7}c|PSOfqJ!2%= zLto6vPe2^(>EqD*4qqePQFfG+SU%c0JuJ{4bHqmjU85byv|TY&Zqb3hbf`{$r1n16 zv9QI7+bqD@sPsK&>R$kMn_DKAlBs<JbpCpLMyn<*+!9li9^co85@;~Cj zFDjOA*w0zLWxxn?QNiW<2~z%Hk06OGM^Y6*lbt7T#jVI@Kpf1{Ge0a*QRhwy))zjP z?tQ>!{LHnh%cJ9zZ`p4#;5+Wr=P-7ELpk^t@KFx}gM0hbCTlzlbHOeXw-_mN0RCZ4 zNgjJzp#zz$mcMj)^ggWPM{8TG)Dkg@-z(VXBKNrU9sH?(u-mMm%QROmGPhph7{nlf zgxf&1A6S9o z9t!ECa^4rO5VP-$L^bk6yW^2324WVKawa;scR*dK5H!kF-(1tEh1`RzBL6IOL!ey^ zV>GCO%xCK9iE3hLX9eU%EQM$)F<#GLkOrd?l2B!TM?QFr3Np_oysW4+tZCCIE2#U{ zw{9K&l<^G@3TW1IEA6mx0J*SZbbJjnnToQH{)i$5+d;;=82Zss>o{f_T74}Cy|7^$ zizcUUx#6;gGcl1tPL5Asl`kFv8H$x8;_11APHr+dXOpoiiD21-F_K4Xl3#bGoP;@W zBjQJOPB4ipqfBvvb~kBiMrk!1jYwxmzn*ih0Vh2gI%x%E{FB=%B2Nt56@|hoM!uKG z*Nn87rk5)tCn<*dXyv31tdG@Jl4w7masM2(c}Mmc`)hEW0GpquawPzR*fNRO$$Pqp z6>;p&xo=2<(@|NbNsei96nDt@o{h(hmrumP`iblnRdUyQG~Oe%cuGlaXE-|5I9A83 zJ41*DFS&tVY1Fj5yktP;V0nSRnu1HkpFu;45H-6Tq>m3&E5J+u!$cTUMbM0S>Megr z@Lxdg*o~oMg=cab4sRy=WvZ3v&)C#~JLgSzIv1^V6j26|W&HAM&t3?0)L&JnL`Z1~ zRk!3*G9BG}%bzlbjh~q|5?Ho{`awYZjQj98S7kF0eLclNL~#~*i;`XXw6PQ+#Z3w? z4WMNsUVrIFnZ6#IBBnd4;LXri=~`nnfX0Q6WckNqjg}k4_hW`(e%;xHhF}mM8MM|U zSoa-jM?I`_9=ot<9Ne(Xnas23O3e6f#{!KJ*f03|R9y$njguzwi z%yzOYfYbT=2?kHE*S~~p>?85kYpml5IJmqbZ93EWm#m2~BT zbA;c5_wGtVx8E;gTNSCHLDR@U#RfpSv#`);jq9VSUTJ=YU%bn6|LgGW15iBdTN(#ThGbo)`B z5OTtEQ~Su^&w{fx^%)4afN5)@Hct;nl4*q+4KUl793L+is*SOLJHb+9)!Dey12$Aj zC<}j@eX*mjYn-=ks*ywppik<1N1XB*lhp}SN?7%;2M$v-LUEOs%`v~`3A=qytAW1t zshd4`2(!&|1B#;b1-~ivZQGp{PVt+kNIMq(8N>eFv{<3#yE=aDNnZ)bs}|m0i3S>_ zdsvAk4;3iS`?E4^_sds^2?^YxZn1Az+V-g+x)%7 za~#Tjs?Khw6*~BeLDp1s9g@`Y?8X~tv8#=VAC2<9-biOefbFTcE zXlqKU@weNZ41j^^)6CB7vPt{4HMUjS_s1^35np9@uvf(GtO)cpFBG=B#Aq?XeP4x` zPkqwe!(~kp`znWTCztUyUertJ@c@O^oJEwXl`%$eLjPZ{EB+9Hh5x`^g?^&33>NEM zPRA5?E{a3}I_J6$S&wuj<2<|VIqlLhB3Acmx%WaQ78&<$Aa~OzJ$(v4LowrIh)!Bf zibZunVrKs^TtuLCYV=6e8|7!MSb|0dAAb3mzFm4Y3R%Kj!p%9(FW^DS>RYaQvXaY0 z+wlu}pNL;PybK`;IV=+K{dyTxIuMWltUqGB?zYtciH4}pOG;joPn-TjF%Y7Dcs`*S z0cGXp*Ic}ccs#+rv}a!5WA35~pO>7hZbdIqcx@k@vp2jvh8pXG#j9H|<71Fvv0Os@ z^O&_(0Cm%~G)MOXQ8MMKE-DVofG+#%mX8p%ALn*FeaIqv1b5QFk-wDp5~WB^#x9CM zOZg;}Ml4vDlfUeX`fZ3tIf`cNm}xz=;ed3rG)#%yG=D%`8`BlPC?I4=J1Zkls; zX^T;{u%ronPL}z}OA}-w27~J2%CNUyam12vQh0*`ZuBU95;}GM0%&VK`2BDSz0dqb zQO5)Tnk%9=te{1#IFrIXwD8w>znP*`$%`7pavOXj*jc;F23OkO4*n}Z(Aqvq!Td>u zh6_a5>5x9;)8wc;jW*}*5z(#Tj2F;){K2Ux1%APl{&S3XbR9CpivEWi4(n@@-Q9&r z%G^@;$gki34Qj}-6|H^M3npcksaaqMDgEXCj#+O2MJe^OVDhQFrn*{1Ykwk8u*`2FE=F=Lyz)SR1ZvvjTF(lz{62h7ru_Dp@z zZq*XW6ZiNF=+S^Np6M;MeRxYq_dd_-!OdfJf38D2q(T!H9sxy>FtO(r`$82>I;1Rd z!yoSZgRxwo4DXF>3xGw!}NcvE| zH_RI)cZjwrp`%I#MxpB)H|zXb0_m#9l9Vo`zoi&)T-;KA1ZOcf{L$A&<0}dLb_tSJ zB{wkAGQeo!(XYqF{dCEiAkDD_o*H~oESo2|2)_G+Vg;-S2%JSNyXn6s3onIrK5k>%OG-yS$vvL2}6L-ctR{O*X?2^2yn=w(s zHYW|u%9EWYEQh^Ogs|}57a~e43Q_y6gcs~NQ0u$@*;z520WxP zCx^A8hkPtL@_j3ZrTE){qz3)>iON#L+iXCVrk0Yr+t*5D?$avo<=q>Zf+o?snBp63 zfEB<}wT*O3F85Qi0V4+Q$%BaY{W^h}Cl$2BgzB$!%k*xm@36#ma|}UqTGK><{yZZ0 zc=nlZks9^Pg7iOQGEwiDnql~kl>Y;qKx4l%3a+K{t21O`Q*ZI`QBufhvx)*!ismVe zcMHd)Mm&9+Z%B74R4r?mFPAuy+>Q~|$D~F=%0&iPbPmt+C|2rpGW<;>ZQq~)OdRWp zZBMKxT1((NY^P{cgo?F#n39*NG3wJrQtOtqkwC;AHS&&}U7|h=yFe5gn5}veGTAm| zxl*;S&ls9ig2ycOZuGDe4Ek?=d2xm0N2p;?fQSA->e6|}K=Llu@`zl>3; zS1k#7q%kgS(g8QtemTeplZ~s=D4u8}xrIx_oty?3>zRnnQqgEUvk$it;TanF2K9<8PDoimbdf?TJgFP9x}#Ir;idzwaQP77 zXbCk3htI@Ek}gr9lcsFgrVGEQVEOUs9#1U2@&5pHbi{64_6!MQR<>(-J z3(*T9iLEa6r>qbmsd9wHBw&(A+$)WFMuN!FcsG)OkP|F(Z2fr0erV$Tv6CTpN|&-5 z{zROit!UIU*>c>J1_TFDH~=aKR*r6I4-{-kP$VUpwv=i~ITPVX=gl@*X&~Vt>tC2e zduIa&GrraVJH7Mk0A|W5X&QsP6Aq-1lv3y?2H%(Cgde=q>Tb7^#RQ~c4n#tI?xn$Km&>V`3STk(6)-nK@|xkz9}5@2QMgTU%d>nFaUC900-y{ zqG_F2bNs+^O;UIN005zx7R&V^ErL=Dk*PTdlS~bvdwMS@sFQSJbQ%rt(bX6wir9c1 zjVP3X(}~2vYVZO1D<9l(f|9NoljdS?HAAA4mQ<;LVufByW(dpXn!_}g^MgE#nW1V0 zR3Mjp&!EQ|2%MJSnmpN6u87UNCT)(NTEV8lW*FY4bQfO6H&}2G4 zScIL*mXT;;Kd{pj0B4|0NZSUJjA_Gv z2uV&bU8D`VH_VYHPzd^!vNygY_{W^2`6Y5KT~O)}Qk0?Ij-%qkH#Ld8yB?`fIr9G0 z&>UMKpx$BQFY;nF;Q^ex%`GEj8n$ilQVL2wwhO;X2|i`hbv0EVcJFw}URc*erA6_s zXAV%wAT~j@u?RqOnbVK77sqR}2X6#VS-L?}Co*w7G~ggYlft$MJRN2ilEEb?)Z%c; zHzTKQ% zjX6nVi=$^($9dK``5m*a3XDG1nkWmF#ydWa4cj^XqDtt-oU{G()cmX& zcJ?*{NtxZ?62^t8TGw%oE^nuNzl)-=pHSl8qEvZ&5g+t-1s6ko%59k)y=zkelCh;r z5YM<18hlHRDmP^~pJ!*Cw65}j&krb6Qdj_!iQYXSiIxf2;-lpQ$nQiP6e&^a@I^yi zmHz;dAmmH)1|N^|MwjBxMujiz9PsgvLeaz&N+5+TLlH-(D4V6BvS`RdtxCX6CmI$9 zL55xt%QRIQ(`0R_ut)=tueV6&oAmW8mP@KxO4yt*=@JXWM6PEx8btXyI;;+|4pNe* zrABr4sD$I@QjSe1P*j2(Fr{OqHYI{xp^&2DsUo=$KbWPvroa3K46fWMnR0BarNb1n zhhpq;VBSVLxZ8`OGpCcz6iiC*CW_A%r<8R@R)l#v)oQc?mDNqTEI2TJu##zLWE#iX z^A!E*i@LMvQtnHgf%rg+6A|>CJw|D0TSE?a!p;l;5ejbk>Mme5>#2qf1VtpTR;UZ zYDsZS#znBlC(wAodDus!%nk~U(=n*ulk$n@LBC>cgWzPEvHi)gUD|k}ic^%&3~4B;f*G)1bvgYDuZ8NSRPHVeAZP9ouSkX7LE=7W5;_k`cprzzCQcii8inOT|yLQ6MPI!}t%^?`btkFUSy@x~&Uzt%%uXtN@j$-)$y)R6Z4k1O3YefKqcfNc+3w#1)W0Ctuo)6%DNljW(@(UBF^q1}oTWok*Mb446mUU1 ztBg_=VzQESRXD@E0JO>n0m#$vVS8S~Mqg`M4fZZsEnQC0^p8L4KRg{=Sc#ffvNZ~R zMZR;0c^|+trw=$`5JVgkkuLydMBk(OHvh;YVHO;*R8lMYF8+DCCAKF~r{Ek#0rxBy})UN01ko0Cyuw34EXZENCbSz<|IM=BLpMX}2 z016`EBw}5Uyd-4U)>R0ekjR`$e@d(CSSk+e4oHOA7580k4eJ%R{zBUD6v;|ufCvhD z-ns)b#`_bjU)lV=^aMFV%@Skz6=w3uzfP%1b0L zF12KlglvZaCEF@emJW7^$4q_**k%ltd3Ad`C{~fIxKL**E>22w?+e#BLbqis{hNG)b;cs|p9p#K#wxESNwadxWfFj(ymiD6 zK^!xakyU}2EGlYs_7N%?#qASP_nPzIPJ4#6ilyL2)!59t-%{DzkoJV7;CseF&YD@l zWQv_s*a#~Qyo-LuYOQzFRmij%d>`X;hM*ReAeDdsR)a1lcbq+VK))xl z{*tn$V~wH^mc08UxO%&jP82vtH`F?$igA36cG5|gM3oVcRoXDmq;I8pXkKbO)Y2O$o#`JlF+$qd;4Gy?gDixZwQuT_G@Si>2jGv z=_Bq-v+z*L0*@qJ-UhVH(`&rxH&!q#={x*mra2X=3ucx{*stwYJzlZ9b=V$5g%d)h z47RKALp51Ix1$rar8Mf0K@UUoba|YalRBgLp+z04;BkqM@1_)SH zotwWI-_9_phnBWb)rXjXA12(?Ae+}p%U}&Unh5WUPAKPlDKP}3?3AS96SLK7YH)>U z$ZNAuP##$uVJKmEhjs)LjSDvDSE^%PPf;R$5J9}0!2|@Ul1)h7GdbrRvR(?IwURZ* z3Z!b|;tzl&L>`OuRRw#0F3NNWLJLms-s@KYk; z0VI-K?gioYs7j^~OH!)0N`FbukFbQl5e<8n3l&(wzN3(DwguJ^v9VD!thu5Ar@4x? z)l%50_J0C)smio6CH*eSIdeJT^p1K%!V8bpF}H*`WFRCVKT2w)9LxnLx3&lDjPBI8 zi#)600L*)+-|6~6*0l2lz}ok^+c zbA!@M){3nK$Q0G|-w8n_6029i>5dNtBNx6vAtr^Ne;#mX#pO>Rt*8iX{eEK^KPG0! z^z`LRa+eG)k~o6J`O*ZUn#eAV&fK!jSsmQK17TcgkRCA(4p>4o_BSkBViJF+rj!*< zo?+VVFlVhaIAX4#;H zd~%L<8&5$eN>LRFFPl;i6w`QMcD7+#yE(RkTSW?v4U~F3bFxfDQbU5EM+ykLNsD8t zTd-JgS(Te_5S3bBqt08@FJ1(TZ&JRx^4~D^W_E6%^MG z)ST%@&lsGd(CMaWS(vx-BgOL0dh(lU#9 z3v@2KQCo_ty-Ye*3gv@a2Udbfvn-Yg0qF}aNCiegrglIsR!WXc0EPbmQ1ul!GCo`K2Omla=A^Op2 zc5{iT(3mP$q-qGMC+<@X6G(K`8lRIo(1mx75)y#hM8D5H(OAhOBUbPkAcZ=V&GU2y zz6^s)nh9{_v-X8)jb)W+X9Tq#nDvd(uy&zU+H+G~$Qt-RoL8nWCvS4;war)386t(q z{D>cJ5!l_If{U6yFX z>1bZm%FMQ2R@0x`mlLlKIB&G;0PXHp*V+nEyDcQ{9;YZgd=Z9OoSe0w5^Sqc{_^&5 z?$L)89Qo23QcGJ3bcYuyWF$%#1^{5g0(YM{mw2t^jZJ~HTP*1}RXG4B9k6S{GMibW zbhFO?091CIslr^T4!eYvEr=Ch!(K3{N>15Slu?~FZQvMxB9xAaPtuSQAnc$My@#3S z?+vJ%$h?s1t#dohLOIaH%vmIzK6%4cIx$6QsfLynd1eb~MbZ*7#PH9~1hGiSf3ng$ zvwcJWSO9A5?tXp|vmVObXt_X=O){aTvBe%IG3P25O|TT+UE!9w#vwqp# z=v5MvF-?mfJR*_p@Q~OixHu7ZxOmmPq5k4e+*cOMB5mK&2BgMr&_6*JYC=I=!cvj< z91G+#FcNZ9kZ@!k*cXDOq@^I1zfz&9K|3k&j;Qdqy+YkQ4q0vU7J|8eq&)z74NV>= zC?x3y{mV+2m9B)H;m8Nx15E~BeFi$KH*9H1p8Yl(7|kni1&0r)js!g^EAM$HPLX#|9%sQ#r_jxk58 zY@LumNuw1Ufb@*LOW+tq=LQaJoop`2dD*E&iQdScS~Ap{b5wh-;lQD4h4 z<6q<}Kl*_nkcn+h6aq5iX=R42QfKunz{r#le2SJOhXaY-z9T5bvWhfkTJ3XYl9}_= zv=6q8!GjOtDIO@K*n2TxfP|N+J_qCc5Z!48?36GCo~JD#;*=GER8x97B*l%a2PqD8bkaH@#MhE8aquEol5PRL!19jdq?hD# zwwusd2PIG(2`}S`faK^MRhodf0I}huay8_wkQcl01FG4IC_O1!as0uHy_v6J3q_cR z)5olJN0l}5O~sy9`hg%$QAlS1w(!x+bS)VHG>ia}z`u(I$4tD020GX|zgD82Bl8&n z?3Em}{{ZxT;D90lD7%-{gew^#^E@V6(s3(E>0Ek2OkX2CnoZJA()x(%;(nG!S!hB` z_L!-m>Z8TKf?^%1*?zdA(Mz-52~brI2gW*=G0fvZ(_u-)O5i`351RZ1N91BxXUPJO ztxv~^zDel@qeD_`XnY%q$E0&|Pm$p56ICg&10=pD zd7$4KTx~03S(Ry+nbU>UDslTzpsT52!O15&W4bHGx<>y1+*fRwX_$$dNCsiUWH8_c zOw}eGD~?_O-?4dxYgOr=X??a}+}q|bYB5x>EwDC@-jtJBs$NYlqdYKAM~PZmLv(CW zU(_qYRHURF)LfsOAoP^7#!-qWzR|P;dSukg&|L)o0M(#0mI9I}Kd3DM+Hp|dELXxO z)*}trf2w8$ZJ{a}gs~l5c}D4A>qeccw~;Y~o6A!BZp-V9dFgnWTS>JYYKZG8D8W0n}Ebqvz_-u5O1)#N=BxsKG6X6+8Dt<12a4^-Ba9 zjGNi2iG4MTHk3i+Ss;z7xxltR#B#PdDXVei4VQoyn603cf~8>I59iVzPMS!=)_%+? zV=(d<4te~>D;_NR{{W(He#X6qUC>0J5S-M^?G=|C7XJXEeV9Zp@gwRPsMYvJlIpKt z2n0=*fUn>jZB0BA8%cKmyX0H-OG$rqP^(I8<@NC{&&K|B32 zL3IU9UXPym-J8}CgV}6LS*ntneJ%hWn3pFaiu9r58#gWL0VUn=4UBnwDfYq9%GG4a zn#I8!@bxk1EbQZIv|3%2L@+JHIO-sFh*V#aXC~;b$v7OH;K$$(0i~_OEZrwCXnmtK zS&6IvHVPI^X<8+dsHn%WedYXvQMHv{Vw`zG+eIgPr zo@n%c)0XtyGNye=aFXofNGY;%OLT1orT(8kUud>#WN~37f=wtiZw>G?V3SB{^u`ek zP%7(@NNT-{b)i|dvXJQ;uM)z(u|>pz*@?=vES;rOPz!jpQ|;g#gUqy*U=1Nx8b)$# z1**~#Hr#0Cydl(mftFTG)GG;UCAoN*SJ`i9x7%dG)Pb&!;dfpT|zqzkPX1hP#a5iU@b zC5S3n4qOlb1AIzq_VQ6srI)pPt2hV zgHBOZ1pfdOZocBSnNt_FI$==AIBuVa9asJT0OUH+MA<;XknpSYfwntmP1>YgKdo5u z9<}8SH08hh{F=ee2qSG}L~k_niBH)D3kixeN@>6Ng4G6&DpegRFQzc9qfrG_1Z6C%otcwc5 zpS)(zPtOX*{{T%S@Wb62mOC0Hq>%S>Q5#D-Hk-P|A;;4T^8WyWa+70j^vgZlx0o(9 zlr4r_vO`b-j_63rorDeR#_9&opBR6|Tgxy-Cg=$jFOL5J!@@?S?phCmWg|Qv?!=w~ z4`b^+?#R;{{UoIohI_GX5W(m}<7(1Qs>P?&C&}Qao*B;%pe42hvE+g~v#x zUeLdxnN!rNQG23tQC|U8H!8}NeI%z;n$@VsC`ZW9)R>=sWvb-l?7a>{hKIr*63Q3g zZBwr7s-WN+y(1{OD?o0r$X9mH-Kn<#-0K3lFJoMNj4s}k$x4z4LW3Sgh9Sz|YGt66 z9&{EU#K$;{_mX)=0T-y9!nEJ~Mlju-uPo}15J#Ye9c-c!#6CAp91LfnSL6)3mNjr+ z{!mtojSD2AsVX-?8$fC(@8cKa(yfE47rk}f8-z^(~0SnBkc5L5J^%dUbvp^stT)5@ia(-ZaW&0pJ?p_6#(jtT=2N*m`H;~D)WywfjR39)f99uZFo<0OBroEZd zU&IccWMGERbUhzG<2XenUjYWGN>)Ui`Q}V+{x$yqd;}cA+aSKJfajmgV`uzTe1u+X z(#&bg7@Nb`!sRusc`=1;m?|Zbrc0?KKS3HWl->;8Y-tuC{?BZt^f_5p5`5OT1ISkRI=Ex-=rE>jfmMr z%B40&YWt4iq%wJ}gsh1`5bdZ*$ZBEdJJE(xDhWIv+gOKcRDfL)%g{OdMw`y~Zpp9t z{{ZO0Cr|_@Xq-s#Cw7pZlfMB8DqArbmIE`s7i`7mBIph(a@TjfPA@HvFEWIJr&R+F zE)jJe-=S`ZRHiJW#t}5Kzk>sqyi7Mtq`#9`?`NCElrNV0T!5aD%ga&GXy;=+r!7tt z03cGPy>gB}r?-f-cM(j3)UH~ShjmORt zl0KWT$Jo-FAwVAOUD~q*WAl7XIK78J&9T9kX+s>8n!gdBE|3=-qnNxIw8>4Hbp*BE zOKjjnZX_6f+-qnaqKcO^CB@NtB`9xdFsNZMB$`g(tJe zg3-=hmr4cIm#YTiNc4CfKm-?d;}~JNL|k)1TvrCVPqZje$t|^w)mQvWdbs8xh4Kv~ zg}Y`Ac)zq`A@0hU#^Rph@Nd*V2g)hOD!{W|P^F?zcQsPZ0Z)b?z>=mFltjdAxIano zKbV>sgIWe64vn>Dtcz;ZEGKBGKUGfFhM4@;I=E+oGLB10moo6Yl%qGO@h8YdF2t%d z<;@NqSI>awhdg1Jhbm%CR$t4M2I$n-rK7cROq37uHS}O%RA(}sQZpvYQp>4IR<&W$ z47ifm%3i|V(;5N5k_-hDh8eQKKl?~DUTnueAl(H0IpL1%x5C9|-Gp9TwPM(6SPn7L@Uj9IGe#x; zpgg0RQ@_M?ipaASngVjy;fIe6=0I|zO)^3S&^DRMvPk!fDJuB?0FrE=0q+pV9ZvEN zXGkr#k0u#U>LH9OOG2};&49;2yNniOaaw2`Cvh#|6LL!oYGBs9)quciV(Lq-1)@rh zg7K?pBP#w3a!U)wqb*c@pOD*##P6L17&H zjxFI8jibM*J6h!1iM|SSz`i?C`&W@28v z2_*T#efY)Ey1j`iQsonNX_u(I==F+vC-Q8eyCn4#6EbpEZ~)xqX%l>PEGI){s{wA9 zd!@Jp6q=@;a)so=!!$RO$}k0nX(aJ>^zn`}se_zN-YwwpMFs=2;5)ui)5XhPmP&AMD6qDJ z{{W3Qx0vWoQv6Xqm9n#)WZzSd(l8sTj8$ zM&6BM5^T&bT>xsMUPCUB)UKGVj3g3CQxV0Vo2 zT|U9!-ti7PQW{zSKza_pK0IAh$NLIVZ-Od*RiPy#Bq#_O8Cq2KYCz@8J0WSh!OH{4htDkp9?iC|EYyokvH~4a(3}4N ztOYGp$?MUvi8v(b2eTGUf_h3q1@ND>(|8S9^MQsjl-zuW402^zDM48!)pVec&5E6e zGZcKam9c0Irdrmc-6=abkPX!63djiua3&|&{+R0oc5;@>6iU;I>QcbeoX;&|K1nnN zk-V9bmm|DU36>2Op;Dnulw$mKg}im6D=wXlA`kz0b@kF3_ey0SdHYD6qaJm>eMg0H~E){=%Y&v&fb)QW5)?iQqnC9PhOCpqy;P7M8TE9F~P74h|T@ z{mzt`s>duHjI}wRKf6m%OAqNmj7og3!ASM;EB!?<8OupZgB+?3{3!>9E-5@`+!J=p zi;eV!f6AWC7jE$X0N6sZ%NJ%Dj|esHW;G(#<*ygaJa+LYHIM0&l4xxfm43C3W*Tsh zvKHAB7NugPEB;U68A2=kQvMBWT|^8>9AReb^8WykrC?QUK%!k<1F3}n08+Gu=t^yh zDNsL449=r;b8>RId=8^bNh<#5Cj7@U(N84WOMuu@4u6;;f@TH9Y0!_zF^q9jr{x>@ z@PXuS1rlr42m)T&JBTC5`gDw7N;q!9FhU_tpO}9>aexwHK}cTxuRkwX1}iLMyB*^X zoJ0^11D);S&(!(G3BmAga40E_iOhg@M&f-jjvq|gIb^NJb-Sm1MfKok^~aR z6%FKj7lGNM1rt=qFf&Q>hmSIDpZkNTYdxTn0b)1B8%rwy>S$T4VL;ttq6rBPhNPNi zFw|BOe2z-R@(ohX1SOL~KuPfr20Xo1tQJs!oB-(EV@r`of6!I1EXs6>_4A0nNlE%g zp`;`PouuC7{Kr+gr!4yu@)0vssu~Ph6+XYSV83XBCCVU>ZhIJt!9y%fnSoMjU+NF# z6pUrHf7mb;lzydCM4xDGvS74`Rg;JwC!7@st+RIvEIIWmZX`@cj->R--%Ir;PJ6)_2fPUtu}>KRdt3d#GTd13^gK!ns}uR)Ts(=9-_A;T~oec|Hc(Wm?gZpAFgm>i&#fTt{4jLp^| zCRRTqS3;D^P-@_40M}j-9w`01rJr^elIpocg)EQTDxUGt*S2dvvm(isl#AWU<{d8b zz_1v3M3Q~F{{Z+9L?ufg%34iH1f_=nm))cus_^shV@t8+xF%6!no5N^93otu>(o*| z21jqrYeLvHMtg=jL-6{RY+3BdSq}C80I@y>I`+T%5EuljN@Kg{C|wu~#0$<1bOFQh zipDPN*WfbLq<~DzyPCZqN?8RW7iwM50lGCHUOo}KNviN}s0t+&916FWFp{K$bP-8x zo0EQFWf>)GDg3(Pz((R z#kz_{V`!@75CWTI{lg=n(8jX{vnUdjngTSB+b$vpp9~v~LG<`9r=r zdkNW*GEoMK1aGIWv;`Sehp`Ekpk0bj&kDxv7l{lNa?jMvYRVN!)NU{n@w93ilQENY z{Uirtz0b|U3}l`dts3@wN@lL_(kKuETu5R`yP2RI;iRiX+V=tzBm%Wv2quAnE-A}~ z^Q;O$O@NrRs3asyl=-Lvq7H-j5NJ67C9%59=87V`wc!mLgVw{nhnTCO?$MY#xyvT< zhMo1M*s*r-Iofi+wJpPd1N3N{DS0bom=cDUD`2p4Dx;g0!RX%syx5JEnzRB_E?Ppk zd}wO<6y>Ph+F;eCX%rK z0M{^8{{YBD>PXucwb|v%SN<~Xg49mabji7|Q-9FEqh3zZ zNth%T&V=*)2*3PN)7 za!u5Xl0a=Klohu%ME;bhfHyEKAyk@Fk*v#^YiWfk?kUs*3f2c~#$v_f zs#q^o-Yy1(DkY)i#~Ay9$L#U}B{P#Ql?%Iw-aKMTRsR6W-(4Rrfpvs zekn;sBTsD3nJ@~5ea9}16-{{*q`9j5Q}dl5wwMBo#)Q)_pLh-X0?tTEa7Y`@JtCZO ztjd7xl<#WbUhokq+?^qk3J_IQQ}&1cA*05U^%<}>iDB3msqhgw%EEvxRpI9dILx%9 z3Yq}tdB{Tf4v@KC)pLdbDg(gAFiSw%1ZX@dVAvHjV+5 zu;c*#1Vcw?&qE2EZm>+XK6D49bheh;CND882_T`mboPnK#eN}K7_dsHTFU|$0uCC$ z>)s&DEviyN)chgLl{y{m`WQ^A5a|VqS2J52#5NW~)rT`(7m`6yVgkizY5~LDqg{zq z0ZUlgS_@se2+FSi0G-e%I|nj)nIJMn+q^+9gk>dPBcuSynx_RogOihVRC~s!CA2mX zLuQ*TNhu(Z8IFWwSd_a*VYI=^mb>W+46CjmbBGa|lfyI7WZMc*>7%HJ%#V-Z4|oS$ zjw`f{C8mf=UL(X0e<;?4>*buj)g?hB0Vp4!05H_zVdLnhmXQ7J{?vI1R1bFVVoqf4iZgC<<=&@(V3v2dnu0p{@0;*?W<?@;f<;|{2gU~B)sJe&Y;yb5-xF@RG$iC|d`DOjq^YAa zJAO>Q_F=nQ&rJ34gp4Ghm|OKT8fwz?cF26JGQZJ_OS(tg`g}21JnRm`CCvaJ>Y@N1 zv3W*PRiF411polE-pYeG=Wd4??-T{l02)-7UMa!5rGF-9tJQParf6=Eu}WEzvZ z#K#NCRe~;54rVh6WrKD3^!Q*SFf$NX5MLO^2&9Dpl9`P%$z3twzBSQ{&zEs`S5hwu z9K(QLM`#I!fhZ{;ZYBs2M3)KyHDKI@TbwNh&a%u>0GO1dg5sbM?NCNWpz{%$4X+?B zC?zD}P%EQ;A(I0|h5+hMq-Fz%#A)-^3A`6xD%0>Fc?X73*q#H@GBX&Kl4cvO?pe6T zi&v1mAahhfU>!$wV$z&z(uGJr`%2P5m<8j2RX><$ar=T(tRdSp<1sIF1YlS*9+6n6 z?j)qE$z?4GAtqHwe0_u#=G|ChTRLr|DNF#|0sKOBl37WOk8oCbsF)NSqNb`k4bJeh z=89QHP=4T~c50?MmYIqi19f;81U5^~kf}*1QV3BXv1)+&V^~>DcnXb%nPp0(rgVT3 zF0#-l#A49VZDu0BW+)~Lq*A(y2UDT6QXSFm3CcQk12xsC_w%1vwaA-VwT`vH6GW zTc6q#Z);T7!w7px6Zt?8pbIMK28Qqo4VEtqy4BEqL?T%PB@;sdkMo=@zXNY!6(L5d zT2CNPP+=Z-{{R5LWCqr~l#-;7R6vRT-QnK@>rHE&Q-kL2^}HSy}%)Elvt@nB}u2;;rNQ-j3aBn(e^q;xTaB3aS6EMgjSvn z0zPaNk%Y`bQi6nuiUG{UU>_#Lz8>+qq=LPR+HA8YfRin0No4~-8~k0mMqPcfWq|#f zp6B~$OOWo8;Y|MkQNXrQIqHW@&h1-0>B~?j5;4LIWzi)j_2VXacoP<2)jkgk+De`OXCmc8j*keqs57%vx|1G}?UgsP-{{4IfGQr6YLP+7668uG32f&uU7=jMHSZ0<`d6ty9GQ$Lt{I}n~o zLofg^IO^k{lrr~Wq`*jg(aQNfR6`>wC@8eV;z?&Qp?V?s)3L~o{k;Ll3OZt&N=zJ$H8tip3IwE@U^K| zNZLPj?D;|^DCvdM0+pQy&QNW?%RjJn2R76bLMx~6A(nK3)Zsq%0KWGbwy|Wm{BQIQ ztRj<5BV4{|VI!N+(PDpxeWwA9opu5nI{>SGy z~M!9Pk z$00Q*=O_(~my+NM!S_K(4dkK^v>F1#8@Vi}yeXCdH5KWOab+m9Eee9LNk9kFHi@|Q z9ZG^Ce8O&$u0TJ`V023X?%qigidA}`u@xAC7>_u2kq5_SCS;PgUG#$3GbFYc#}xV8 zd3yLAM$k)DieLH^ObG#7B%Ml#A2g1(O+1s@Wg^QW>tf`J{l0a8gXA5K%qLkE6^aQU z>9Ue&`Lr*{TgcyI34&Ot&GB-~F?wZ2i&YbQF*{u@5T_)lsF&7}rmBXEvHN6@t1^|K zLvN!)Nni=&3TMi^1JKaW*O(YbPi`)>(6uW8Zk;JisZ#s03!VA-dB+oL!9Q}My-Xg;or1IuO!A$OXh3>l$A_e5!$y^Y#ugG(?9@xJa#&?o0G^T0$;jbdcEcFk zAohAwJ!usHQOz9hFm$ z5haFE>S4Qss|Gtx$l&*D{{WIuePH%CRwZt=B4UShkc5nxtDOZJ z6}H|6O11?lQxiZ5d~*+%I9AwBOu^n21MQ|hFw&jNvNBksrA=XUia0dgTs~zAqz zt4%LeOIBy*pD~Kb;@Fj%LlSK(DJ0BN5Jdt>0fjKrd5ojvQ!>+N1waQW8{Ns@0kVri z=~+n%N!;|~6KG<_YSr~Gc3uo2?!b~n8BWAMT_7_RVv~vFUKWr`T{)6?m}N-<#;mf_ zn}fy!4Y2l!VW@`cB5+NHLgbqqC!q?dqO z3uLgRNd@+aAO|9X70sA3yEU;-I>A$5B4v{R;8!!@3gp6OPDx6FRraZmgiLail;jEq zqri!nAfgGbIQqi?k)B5{QPKi4ELE@uZ>KJKK>{r>-cw>|IuD)@1}IB{txF6&3=&HlX`^X;o<2A6csN3ttl=ASWs8y_zi^xvrd>2pcctcVhK}lyV553l%3FA zs9{s)nL<%(g9=dAkFIduaxJDRxd$9`0BHs1L9vT1qGYTTN)ObAcMQfA#{o{we#Gqr zCP_-ca998x$#NO7{6(*@jR+#sB}uz61dwRq%T?LAbb%=*sf?5G1%05CB~sYcve3K< zO7vuf5*2ten(qR;$$@_132L#KXmX@feIUL}*g~4X)QX*Gb zMX02*OC(_67h+hNKv9IhAZ2Kz$%-|+6;HeQhxEts242AnLcnl98UQ_Dc6&Efpw?Si zN|d5%Q=sKYAC-kFGZN8CnvfM^?Y9gN|DMb7&#fb3I5$I9Oeecy(zE# zFy|EeGxis-Z}fI>#$h9Ocs(lJAUL(K*(&VB&DyiENOu>>IW{hJiP&@y_8lr2jb3It zhiV+3bR4{-3m3F@NCo@Zf8rJ#_|gN6{{a60ENeyk63PnI-vST?3U3i|~ATi7r_-PGd z9Hfxoh{jBSXlaFDnB^AaK6%C%B*hD;OMNc;$vW_1)&LkKD8Vu%FNa@f24T`g&F{0; z2a|XyISgAfj1hc*@{BbHLPfw@sx&7+FdWP)*vSh4<^&o0q_N)*;8Q|re(<(CT!?e& z1uO{Nu?3f5lmU)Ns~_SCgGroFT(KMT9*`QmnLhyp%h@<43YGxjPSGeNvoHfTbK~Y? zCaT$*3NA4qZ8)yDy*T;9CM!(3C51U9xct%tpHCKLVQf}D`2?!)4#{deQZsrQs5ko( za$M2OG;YQRH?i3zUEp#1aOh(>`N{Cyr(qYK}Vo%NSkQyVr4W1g0 zAs7^emag$ZOu;M?JV`qL01-rtQqq7$=WoOovt~tmvH{sA3W|uN(d|`IG^M6J-je#V3lv9#KUPaU8@{w4539+W|w28XeCmNr71$7 zny@e-=|GlEQ##rI0IE;@{{SAa&UOk$eYsYJtq7bDfm^pV&aaLF6)=_UYY%rQF7n)v zE`_8QFaA{L0mT_)Uc+IRH6>1H!);)1(iue=wucwlsX`sWYO|o?TLLS?bs@TgvobrI z%BZ@5g8_M=JJ!PFY{*@eAcR@ezuhrR{Hy5+E=P2YO^MH#QWRzX0Bn%P>YAN!sn1Ac z*hG_Vn{wz{m~inXy2VpzY}AJ=wskh#<)b1>wn3&f!>m#%)>{>xi?Mq&$to&ZmQ*uQ zBl+J6IOHjwL37DDYliclG0wC&6KGXfu+ksJyn{g`E$%6RH^OKc!EATMAE!VWwFa1> zkRFfdA~NK1Nt;L{j1a8UPB{Q@8bmpsTo#d$%9zS~sdAt&5oN-f>u>uti`nK7Qjl0E zIU^kNg`87epMZk4N(p5La-u7`6ScZB*+mVP-p~qA1);Eq5teqSfEpzS${0hHf#(<~ znMrj3V+YApW^;k$Z7U*Lnxo^0EeKc|JSbj*o=$Rr z!nAbMzBy6?VQRuzP2{K?1`$N?bG4CqUr-vB@n{7L-J#tWTgh^fULr6`%Asgk4-m3a z3Pm@QuG3Z-n6ic^Kp-cOcqo)Q)a22K53CtBWQ~g@kv9;hQ>X%KOd^p<>_;Yr) z#Hm0vYFy)tERwnkXIeWvDGFD4Fc1j^*!T}vWr!IEoPnS;&ai1lDnh2)GZe1wU{W?nIYV+m zQYxA1Ym5ytyo^oOGyco0|R&pkx^Em5#~m6morW1ql)TdUUevBmSOK5h>~7I{{XC1 zl!XA56)CpEw+uXEH%|ep4Q*wiB|uje0ANOtoHKG#Y-Xs>Ot7U8a0P(VlwY$;0=75W tDNUIY`?L$wL3Jpok&Xe|Eh9HUSr6DY$+H1Zc%&sEfI}zJH)he9|Je#1sI341 literal 0 HcmV?d00001 diff --git a/cnn_class2/tf_resnet.py b/cnn_class2/tf_resnet.py new file mode 100644 index 00000000..2e52998d --- /dev/null +++ b/cnn_class2/tf_resnet.py @@ -0,0 +1,260 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +# Let's go up to the end of the first conv block +# to make sure everything has been loaded correctly +# compared to keras +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +import keras + +from keras.applications.resnet50 import ResNet50 +from keras.models import Model +from keras.preprocessing import image +from keras.layers import Dense +from keras.applications.resnet50 import preprocess_input, decode_predictions + +from tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock +from tf_resnet_identity_block import IdentityBlock +from tf_resnet_first_layers import ReLULayer, MaxPoolLayer + + +# NOTE: dependent on your Keras version +# this script used 2.1.1 +# [, +# , +# , +# , +# , +# +# ConvBlock +# IdentityBlock x 2 +# +# ConvBlock +# IdentityBlock x 3 +# +# ConvBlock +# IdentityBlock x 5 +# +# ConvBlock +# IdentityBlock x 2 +# +# AveragePooling2D +# Flatten +# Dense (Softmax) +# ] + + +# define some additional layers so they have a forward function +class AvgPool: + def __init__(self, ksize): + self.ksize = ksize + + def forward(self, X): + return tf.nn.avg_pool( + X, + ksize=[1, self.ksize, self.ksize, 1], + strides=[1, 1, 1, 1], + padding='VALID' + ) + + def get_params(self): + return [] + +class Flatten: + def forward(self, X): + return tf.contrib.layers.flatten(X) + + def get_params(self): + return [] + + +def custom_softmax(x): + m = tf.reduce_max(x, 1) + x = x - m + e = tf.exp(x) + return e / tf.reduce_sum(e, -1) + + +class DenseLayer: + def __init__(self, mi, mo): + self.W = tf.Variable((np.random.randn(mi, mo) * np.sqrt(2.0 / mi)).astype(np.float32)) + self.b = tf.Variable(np.zeros(mo, dtype=np.float32)) + + def forward(self, X): + # unfortunately these all yield slightly different answers + # return tf.nn.softmax(tf.matmul(X, self.W) + self.b) + # return custom_softmax(tf.matmul(X, self.W) + self.b) + # return keras.activations.softmax(tf.matmul(X, self.W) + self.b) + return tf.matmul(X, self.W) + self.b + + def copyFromKerasLayers(self, layer): + W, b = layer.get_weights() + op1 = self.W.assign(W) + op2 = self.b.assign(b) + self.session.run((op1, op2)) + + def get_params(self): + return [self.W, self.b] + + +class TFResNet: + def __init__(self): + self.layers = [ + # before conv block + ConvLayer(d=7, mi=3, mo=64, stride=2, padding='SAME'), + BatchNormLayer(64), + ReLULayer(), + MaxPoolLayer(dim=3), + # conv block + ConvBlock(mi=64, fm_sizes=[64, 64, 256], stride=1), + # identity block x 2 + IdentityBlock(mi=256, fm_sizes=[64, 64, 256]), + IdentityBlock(mi=256, fm_sizes=[64, 64, 256]), + # conv block + ConvBlock(mi=256, fm_sizes=[128, 128, 512], stride=2), + # identity block x 3 + IdentityBlock(mi=512, fm_sizes=[128, 128, 512]), + IdentityBlock(mi=512, fm_sizes=[128, 128, 512]), + IdentityBlock(mi=512, fm_sizes=[128, 128, 512]), + # conv block + ConvBlock(mi=512, fm_sizes=[256, 256, 1024], stride=2), + # identity block x 5 + IdentityBlock(mi=1024, fm_sizes=[256, 256, 1024]), + IdentityBlock(mi=1024, fm_sizes=[256, 256, 1024]), + IdentityBlock(mi=1024, fm_sizes=[256, 256, 1024]), + IdentityBlock(mi=1024, fm_sizes=[256, 256, 1024]), + IdentityBlock(mi=1024, fm_sizes=[256, 256, 1024]), + # conv block + ConvBlock(mi=1024, fm_sizes=[512, 512, 2048], stride=2), + # identity block x 2 + IdentityBlock(mi=2048, fm_sizes=[512, 512, 2048]), + IdentityBlock(mi=2048, fm_sizes=[512, 512, 2048]), + # pool / flatten / dense + AvgPool(ksize=7), + Flatten(), + DenseLayer(mi=2048, mo=1000) + ] + self.input_ = tf.placeholder(tf.float32, shape=(None, 224, 224, 3)) + self.output = self.forward(self.input_) + + def copyFromKerasLayers(self, layers): + # conv + self.layers[0].copyFromKerasLayers(layers[1]) + # bn + self.layers[1].copyFromKerasLayers(layers[2]) + # cb + self.layers[4].copyFromKerasLayers(layers[5:17]) # size=12 + # ib x 2 + self.layers[5].copyFromKerasLayers(layers[17:27]) # size=10 + self.layers[6].copyFromKerasLayers(layers[27:37]) + # cb + self.layers[7].copyFromKerasLayers(layers[37:49]) + # ib x 3 + self.layers[8].copyFromKerasLayers(layers[49:59]) + self.layers[9].copyFromKerasLayers(layers[59:69]) + self.layers[10].copyFromKerasLayers(layers[69:79]) + # cb + self.layers[11].copyFromKerasLayers(layers[79:91]) + # ib x 5 + self.layers[12].copyFromKerasLayers(layers[91:101]) + self.layers[13].copyFromKerasLayers(layers[101:111]) + self.layers[14].copyFromKerasLayers(layers[111:121]) + self.layers[15].copyFromKerasLayers(layers[121:131]) + self.layers[16].copyFromKerasLayers(layers[131:141]) + # cb + self.layers[17].copyFromKerasLayers(layers[141:153]) + # ib x 2 + self.layers[18].copyFromKerasLayers(layers[153:163]) + self.layers[19].copyFromKerasLayers(layers[163:173]) + # dense + self.layers[22].copyFromKerasLayers(layers[175]) + + + def forward(self, X): + for layer in self.layers: + X = layer.forward(X) + return X + + def predict(self, X): + assert(self.session is not None) + return self.session.run( + self.output, + feed_dict={self.input_: X} + ) + + def set_session(self, session): + self.session = session + for layer in self.layers: + if isinstance(layer, ConvBlock) or isinstance(layer, IdentityBlock): + layer.set_session(session) + else: + layer.session = session + + def get_params(self): + params = [] + for layer in self.layers: + params += layer.get_params() + + +if __name__ == '__main__': + # you can also set weights to None, it doesn't matter + resnet_ = ResNet50(weights='imagenet') + + # make a new resnet without the softmax + x = resnet_.layers[-2].output + W, b = resnet_.layers[-1].get_weights() + y = Dense(1000)(x) + resnet = Model(resnet_.input, y) + resnet.layers[-1].set_weights([W, b]) + + # you can determine the correct layer + # by looking at resnet.layers in the console + partial_model = Model( + inputs=resnet.input, + outputs=resnet.layers[175].output + ) + + # maybe useful when building your model + # to look at the layers you're trying to copy + print(partial_model.summary()) + + # create an instance of our own model + my_partial_resnet = TFResNet() + + # make a fake image + X = np.random.random((1, 224, 224, 3)) + + # get keras output + keras_output = partial_model.predict(X) + + ### get my model output ### + + # init only the variables in our net + init = tf.variables_initializer(my_partial_resnet.get_params()) + + # note: starting a new session messes up the Keras model + session = keras.backend.get_session() + my_partial_resnet.set_session(session) + session.run(init) + + # first, just make sure we can get any output + first_output = my_partial_resnet.predict(X) + print("first_output.shape:", first_output.shape) + + # copy params from Keras model + my_partial_resnet.copyFromKerasLayers(partial_model.layers) + + # compare the 2 models + output = my_partial_resnet.predict(X) + diff = np.abs(output - keras_output).sum() + if diff < 1e-10: + print("Everything's great!") + else: + print("diff = %s" % diff) diff --git a/cnn_class2/tf_resnet_convblock.py b/cnn_class2/tf_resnet_convblock.py new file mode 100644 index 00000000..703d6fea --- /dev/null +++ b/cnn_class2/tf_resnet_convblock.py @@ -0,0 +1,203 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt + + +def init_filter(d, mi, mo, stride): + return (np.random.randn(d, d, mi, mo) * np.sqrt(2.0 / (d * d * mi))).astype(np.float32) + + +class ConvLayer: + def __init__(self, d, mi, mo, stride=2, padding='VALID'): + self.W = tf.Variable(init_filter(d, mi, mo, stride)) + self.b = tf.Variable(np.zeros(mo, dtype=np.float32)) + self.stride = stride + self.padding = padding + + def forward(self, X): + X = tf.nn.conv2d( + X, + self.W, + strides=[1, self.stride, self.stride, 1], + padding=self.padding + ) + X = X + self.b + return X + + def copyFromKerasLayers(self, layer): + # only 1 layer to copy from + W, b = layer.get_weights() + op1 = self.W.assign(W) + op2 = self.b.assign(b) + self.session.run((op1, op2)) + + # def copyFromWeights(self, W, b): + # op1 = self.W.assign(W) + # op2 = self.b.assign(b) + # self.session.run((op1, op2)) + + def get_params(self): + return [self.W, self.b] + + +class BatchNormLayer: + def __init__(self, D): + self.running_mean = tf.Variable(np.zeros(D, dtype=np.float32), trainable=False) + self.running_var = tf.Variable(np.ones(D, dtype=np.float32), trainable=False) + self.gamma = tf.Variable(np.ones(D, dtype=np.float32)) + self.beta = tf.Variable(np.zeros(D, dtype=np.float32)) + + def forward(self, X): + return tf.nn.batch_normalization( + X, + self.running_mean, + self.running_var, + self.beta, + self.gamma, + 1e-3 + ) + + def copyFromKerasLayers(self, layer): + # only 1 layer to copy from + # order: + # gamma, beta, moving mean, moving variance + gamma, beta, running_mean, running_var = layer.get_weights() + op1 = self.running_mean.assign(running_mean) + op2 = self.running_var.assign(running_var) + op3 = self.gamma.assign(gamma) + op4 = self.beta.assign(beta) + self.session.run((op1, op2, op3, op4)) + + def get_params(self): + return [self.running_mean, self.running_var, self.gamma, self.beta] + + +class ConvBlock: + def __init__(self, mi, fm_sizes, stride=2, activation=tf.nn.relu): + # conv1, conv2, conv3 + # note: # feature maps shortcut = # feauture maps conv 3 + assert(len(fm_sizes) == 3) + + # note: kernel size in 2nd conv is always 3 + # so we won't bother including it as an arg + + # note: stride only applies to conv 1 in main branch + # and conv in shortcut, otherwise stride is 1 + + self.session = None + self.f = tf.nn.relu + + # init main branch + # Conv -> BN -> F() ---> Conv -> BN -> F() ---> Conv -> BN + self.conv1 = ConvLayer(1, mi, fm_sizes[0], stride) + self.bn1 = BatchNormLayer(fm_sizes[0]) + self.conv2 = ConvLayer(3, fm_sizes[0], fm_sizes[1], 1, 'SAME') + self.bn2 = BatchNormLayer(fm_sizes[1]) + self.conv3 = ConvLayer(1, fm_sizes[1], fm_sizes[2], 1) + self.bn3 = BatchNormLayer(fm_sizes[2]) + + # init shortcut branch + # Conv -> BN + self.convs = ConvLayer(1, mi, fm_sizes[2], stride) + self.bns = BatchNormLayer(fm_sizes[2]) + + # in case needed later + self.layers = [ + self.conv1, self.bn1, + self.conv2, self.bn2, + self.conv3, self.bn3, + self.convs, self.bns + ] + + # this will not be used when input passed in from + # a previous layer + self.input_ = tf.placeholder(tf.float32, shape=(1, 224, 224, mi)) + self.output = self.forward(self.input_) + + def forward(self, X): + # main branch + FX = self.conv1.forward(X) + FX = self.bn1.forward(FX) + FX = self.f(FX) + FX = self.conv2.forward(FX) + FX = self.bn2.forward(FX) + FX = self.f(FX) + FX = self.conv3.forward(FX) + FX = self.bn3.forward(FX) + + # shortcut branch + SX = self.convs.forward(X) + SX = self.bns.forward(SX) + + return self.f(FX + SX) + + def predict(self, X): + assert(self.session is not None) + return self.session.run( + self.output, + feed_dict={self.input_: X} + ) + + def set_session(self, session): + # need to make this a session + # so assignment happens on sublayers too + self.session = session + self.conv1.session = session + self.bn1.session = session + self.conv2.session = session + self.bn2.session = session + self.conv3.session = session + self.bn3.session = session + self.convs.session = session + self.bns.session = session + + def copyFromKerasLayers(self, layers): + # [, + # , + # , + # , + # , + # , + # , + # , + # , + # , + # , + # ] + self.conv1.copyFromKerasLayers(layers[0]) + self.bn1.copyFromKerasLayers(layers[1]) + self.conv2.copyFromKerasLayers(layers[3]) + self.bn2.copyFromKerasLayers(layers[4]) + self.conv3.copyFromKerasLayers(layers[6]) + self.bn3.copyFromKerasLayers(layers[8]) + self.convs.copyFromKerasLayers(layers[7]) + self.bns.copyFromKerasLayers(layers[9]) + + def get_params(self): + params = [] + for layer in self.layers: + params += layer.get_params() + return params + + +if __name__ == '__main__': + conv_block = ConvBlock(mi=3, fm_sizes=[64, 64, 256], stride=1) + + # make a fake image + X = np.random.random((1, 224, 224, 3)) + + init = tf.global_variables_initializer() + with tf.Session() as session: + conv_block.set_session(session) + session.run(init) + + output = conv_block.predict(X) + print("output.shape:", output.shape) diff --git a/cnn_class2/tf_resnet_convblock_starter.py b/cnn_class2/tf_resnet_convblock_starter.py new file mode 100644 index 00000000..dc0322ff --- /dev/null +++ b/cnn_class2/tf_resnet_convblock_starter.py @@ -0,0 +1,35 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt + + +class ConvBlock: + def __init__(self): + pass + + def predict(self, X): + pass + + +if __name__ == '__main__': + conv_block = ConvBlock() + + + # make a fake image + X = np.random.random((1, 224, 224, 3)) + + init = tf.global_variables_initializer() + with tf.Session() as session: + conv_block.session = session + session.run(init) + + output = conv_block.predict(X): + print("output.shape:", output.shape) \ No newline at end of file diff --git a/cnn_class2/tf_resnet_first_layers.py b/cnn_class2/tf_resnet_first_layers.py new file mode 100644 index 00000000..79b7490b --- /dev/null +++ b/cnn_class2/tf_resnet_first_layers.py @@ -0,0 +1,155 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +# Let's go up to the end of the first conv block +# to make sure everything has been loaded correctly +# compared to keras +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +import keras + +from keras.applications.resnet50 import ResNet50 +from keras.models import Model +from keras.preprocessing import image +from keras.applications.resnet50 import preprocess_input, decode_predictions + +from tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock + + +# NOTE: dependent on your Keras version +# this script used 2.1.1 +# [, +# , +# , +# , +# , +# , +# , +# , +# , +# , +# , +# , +# , +# , +# , +# , +# ] + + +# define some additional layers so they have a forward function +class ReLULayer: + def forward(self, X): + return tf.nn.relu(X) + + def get_params(self): + return [] + +class MaxPoolLayer: + def __init__(self, dim): + self.dim = dim + + def forward(self, X): + return tf.nn.max_pool( + X, + ksize=[1, self.dim, self.dim, 1], + strides=[1, 2, 2, 1], + padding='VALID' + ) + + def get_params(self): + return [] + +class PartialResNet: + def __init__(self): + self.layers = [ + # before conv block + ConvLayer(d=7, mi=3, mo=64, stride=2, padding='SAME'), + BatchNormLayer(64), + ReLULayer(), + MaxPoolLayer(dim=3), + # conv block + ConvBlock(mi=64, fm_sizes=[64, 64, 256], stride=1), + ] + self.input_ = tf.placeholder(tf.float32, shape=(None, 224, 224, 3)) + self.output = self.forward(self.input_) + + def copyFromKerasLayers(self, layers): + self.layers[0].copyFromKerasLayers(layers[1]) + self.layers[1].copyFromKerasLayers(layers[2]) + self.layers[4].copyFromKerasLayers(layers[5:]) + + def forward(self, X): + for layer in self.layers: + X = layer.forward(X) + return X + + def predict(self, X): + assert(self.session is not None) + return self.session.run( + self.output, + feed_dict={self.input_: X} + ) + + def set_session(self, session): + self.session = session + self.layers[0].session = session + self.layers[1].session = session + self.layers[4].set_session(session) + + def get_params(self): + params = [] + for layer in self.layers: + params += layer.get_params() + + +if __name__ == '__main__': + # you can also set weights to None, it doesn't matter + resnet = ResNet50(weights='imagenet') + + # you can determine the correct layer + # by looking at resnet.layers in the console + partial_model = Model( + inputs=resnet.input, + outputs=resnet.layers[16].output + ) + print(partial_model.summary()) + # for layer in partial_model.layers: + # layer.trainable = False + + my_partial_resnet = PartialResNet() + + # make a fake image + X = np.random.random((1, 224, 224, 3)) + + # get keras output + keras_output = partial_model.predict(X) + + # get my model output + init = tf.variables_initializer(my_partial_resnet.get_params()) + + # note: starting a new session messes up the Keras model + session = keras.backend.get_session() + my_partial_resnet.set_session(session) + session.run(init) + + # first, just make sure we can get any output + first_output = my_partial_resnet.predict(X) + print("first_output.shape:", first_output.shape) + + # copy params from Keras model + my_partial_resnet.copyFromKerasLayers(partial_model.layers) + + # compare the 2 models + output = my_partial_resnet.predict(X) + diff = np.abs(output - keras_output).sum() + if diff < 1e-10: + print("Everything's great!") + else: + print("diff = %s" % diff) diff --git a/cnn_class2/tf_resnet_first_layers_starter.py b/cnn_class2/tf_resnet_first_layers_starter.py new file mode 100644 index 00000000..accaf4bd --- /dev/null +++ b/cnn_class2/tf_resnet_first_layers_starter.py @@ -0,0 +1,91 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +# Let's go up to the end of the first conv block +# to make sure everything has been loaded correctly +# compared to keras +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +import keras + +from keras.applications.resnet50 import ResNet50 +from keras.models import Model +from keras.preprocessing import image +from keras.applications.resnet50 import preprocess_input, decode_predictions + +from tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock + + + +class PartialResNet: + def __init__(self): + # TODO + pass + + def copyFromKerasLayers(self, layers): + # TODO + pass + + def predict(self, X): + # TODO + pass + + def set_session(self, session): + self.session = session + # TODO: finish this + + def get_params(self): + params = [] + # TODO: finish this + + +if __name__ == '__main__': + # you can also set weights to None, it doesn't matter + resnet = ResNet50(weights='imagenet') + + # you can determine the correct layer + # by looking at resnet.layers in the console + partial_model = Model( + inputs=resnet.input, + outputs=resnet.layers[16].output + ) + print(partial_model.summary()) + # for layer in partial_model.layers: + # layer.trainable = False + + my_partial_resnet = PartialResNet() + + # make a fake image + X = np.random.random((1, 224, 224, 3)) + + # get keras output + keras_output = partial_model.predict(X) + + # get my model output + init = tf.variables_initializer(my_partial_resnet.get_params()) + + # note: starting a new session messes up the Keras model + session = keras.backend.get_session() + my_partial_resnet.set_session(session) + session.run(init) + + # first, just make sure we can get any output + first_output = my_partial_resnet.predict(X) + print("first_output.shape:", first_output.shape) + + # copy params from Keras model + my_partial_resnet.copyFromKerasLayers(partial_model.layers) + + # compare the 2 models + output = my_partial_resnet.predict(X) + diff = np.abs(output - keras_output).sum() + if diff < 1e-10: + print("Everything's great!") + else: + print("diff = %s" % diff) diff --git a/cnn_class2/tf_resnet_identity_block.py b/cnn_class2/tf_resnet_identity_block.py new file mode 100644 index 00000000..8583e349 --- /dev/null +++ b/cnn_class2/tf_resnet_identity_block.py @@ -0,0 +1,118 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt + +from tf_resnet_convblock import ConvLayer, BatchNormLayer + + +class IdentityBlock: + def __init__(self, mi, fm_sizes, activation=tf.nn.relu): + # conv1, conv2, conv3 + # note: # feature maps shortcut = # feauture maps conv 3 + assert(len(fm_sizes) == 3) + + # note: kernel size in 2nd conv is always 3 + # so we won't bother including it as an arg + + self.session = None + self.f = tf.nn.relu + + # init main branch + # Conv -> BN -> F() ---> Conv -> BN -> F() ---> Conv -> BN + self.conv1 = ConvLayer(1, mi, fm_sizes[0], 1) + self.bn1 = BatchNormLayer(fm_sizes[0]) + self.conv2 = ConvLayer(3, fm_sizes[0], fm_sizes[1], 1, 'SAME') + self.bn2 = BatchNormLayer(fm_sizes[1]) + self.conv3 = ConvLayer(1, fm_sizes[1], fm_sizes[2], 1) + self.bn3 = BatchNormLayer(fm_sizes[2]) + + # in case needed later + self.layers = [ + self.conv1, self.bn1, + self.conv2, self.bn2, + self.conv3, self.bn3, + ] + + # this will not be used when input passed in from + # a previous layer + self.input_ = tf.placeholder(tf.float32, shape=(1, 224, 224, mi)) + self.output = self.forward(self.input_) + + def forward(self, X): + # main branch + FX = self.conv1.forward(X) + FX = self.bn1.forward(FX) + FX = self.f(FX) + FX = self.conv2.forward(FX) + FX = self.bn2.forward(FX) + FX = self.f(FX) + FX = self.conv3.forward(FX) + FX = self.bn3.forward(FX) + + return self.f(FX + X) + + def predict(self, X): + assert(self.session is not None) + return self.session.run( + self.output, + feed_dict={self.input_: X} + ) + + def set_session(self, session): + # need to make this a session + # so assignment happens on sublayers too + self.session = session + self.conv1.session = session + self.bn1.session = session + self.conv2.session = session + self.bn2.session = session + self.conv3.session = session + self.bn3.session = session + + def copyFromKerasLayers(self, layers): + assert(len(layers) == 10) + # , + # , + # , + # , + # , + # , + # , + # , + # , + # + self.conv1.copyFromKerasLayers(layers[0]) + self.bn1.copyFromKerasLayers(layers[1]) + self.conv2.copyFromKerasLayers(layers[3]) + self.bn2.copyFromKerasLayers(layers[4]) + self.conv3.copyFromKerasLayers(layers[6]) + self.bn3.copyFromKerasLayers(layers[7]) + + def get_params(self): + params = [] + for layer in self.layers: + params += layer.get_params() + return params + + +if __name__ == '__main__': + identity_block = IdentityBlock(mi=256, fm_sizes=[64, 64, 256]) + + # make a fake image + X = np.random.random((1, 224, 224, 256)) + + init = tf.global_variables_initializer() + with tf.Session() as session: + identity_block.set_session(session) + session.run(init) + + output = identity_block.predict(X) + print("output.shape:", output.shape) diff --git a/cnn_class2/tf_resnet_identity_block_starter.py b/cnn_class2/tf_resnet_identity_block_starter.py new file mode 100644 index 00000000..4b822b97 --- /dev/null +++ b/cnn_class2/tf_resnet_identity_block_starter.py @@ -0,0 +1,40 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt + +from tf_resnet_convblock import ConvLayer, BatchNormLayer + + +class IdentityBlock: + def __init__(self): + # TODO + pass + + + def predict(self, X): + # TODO + pass + + + +if __name__ == '__main__': + identity_block = IdentityBlock() + + # make a fake image + X = np.random.random((1, 224, 224, 256)) + + init = tf.global_variables_initializer() + with tf.Session() as session: + identity_block.set_session(session) + session.run(init) + + output = identity_block.predict(X) + print("output.shape:", output.shape) diff --git a/cnn_class2/use_pretrained_weights_resnet.py b/cnn_class2/use_pretrained_weights_resnet.py new file mode 100644 index 00000000..1fa1f7c4 --- /dev/null +++ b/cnn_class2/use_pretrained_weights_resnet.py @@ -0,0 +1,183 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.layers import Input, Lambda, Dense, Flatten +from keras.models import Model +from keras.applications.resnet50 import ResNet50, preprocess_input +# from keras.applications.inception_v3 import InceptionV3, preprocess_input +from keras.preprocessing import image +from keras.preprocessing.image import ImageDataGenerator + +from sklearn.metrics import confusion_matrix +import numpy as np +import matplotlib.pyplot as plt + +from glob import glob + + +# re-size all the images to this +IMAGE_SIZE = [224, 224] # feel free to change depending on dataset + +# training config: +epochs = 16 +batch_size = 32 + +# https://www.kaggle.com/paultimothymooney/blood-cells +train_path = '../large_files/blood_cell_images/TRAIN' +valid_path = '../large_files/blood_cell_images/TEST' + +# https://www.kaggle.com/moltean/fruits +# train_path = '../large_files/fruits-360/Training' +# valid_path = '../large_files/fruits-360/Validation' +# train_path = '../large_files/fruits-360-small/Training' +# valid_path = '../large_files/fruits-360-small/Validation' + +# useful for getting number of files +image_files = glob(train_path + '/*/*.jp*g') +valid_image_files = glob(valid_path + '/*/*.jp*g') + +# useful for getting number of classes +folders = glob(train_path + '/*') + + +# look at an image for fun +plt.imshow(image.load_img(np.random.choice(image_files))) +plt.show() + + +# add preprocessing layer to the front of VGG +res = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) + +# don't train existing weights +for layer in res.layers: + layer.trainable = False + +# our layers - you can add more if you want +x = Flatten()(res.output) +# x = Dense(1000, activation='relu')(x) +prediction = Dense(len(folders), activation='softmax')(x) + + +# create a model object +model = Model(inputs=res.input, outputs=prediction) + +# view the structure of the model +model.summary() + +# tell the model what cost and optimization method to use +model.compile( + loss='categorical_crossentropy', + optimizer='rmsprop', + metrics=['accuracy'] +) + + + +# create an instance of ImageDataGenerator +gen = ImageDataGenerator( + rotation_range=20, + width_shift_range=0.1, + height_shift_range=0.1, + shear_range=0.1, + zoom_range=0.2, + horizontal_flip=True, + vertical_flip=True, + preprocessing_function=preprocess_input +) + + +# test generator to see how it works and some other useful things + +# get label mapping for confusion matrix plot later +test_gen = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE) +print(test_gen.class_indices) +labels = [None] * len(test_gen.class_indices) +for k, v in test_gen.class_indices.items(): + labels[v] = k + +# should be a strangely colored image (due to VGG weights being BGR) +for x, y in test_gen: + print("min:", x[0].min(), "max:", x[0].max()) + plt.title(labels[np.argmax(y[0])]) + plt.imshow(x[0]) + plt.show() + break + + +# create generators +train_generator = gen.flow_from_directory( + train_path, + target_size=IMAGE_SIZE, + shuffle=True, + batch_size=batch_size, +) +valid_generator = gen.flow_from_directory( + valid_path, + target_size=IMAGE_SIZE, + shuffle=True, + batch_size=batch_size, +) + + +# fit the model +r = model.fit_generator( + train_generator, + validation_data=valid_generator, + epochs=epochs, + steps_per_epoch=len(image_files) // batch_size, + validation_steps=len(valid_image_files) // batch_size, +) + + + +def get_confusion_matrix(data_path, N): + # we need to see the data in the same order + # for both predictions and targets + print("Generating confusion matrix", N) + predictions = [] + targets = [] + i = 0 + for x, y in gen.flow_from_directory(data_path, target_size=IMAGE_SIZE, shuffle=False, batch_size=batch_size * 2): + i += 1 + if i % 50 == 0: + print(i) + p = model.predict(x) + p = np.argmax(p, axis=1) + y = np.argmax(y, axis=1) + predictions = np.concatenate((predictions, p)) + targets = np.concatenate((targets, y)) + if len(targets) >= N: + break + + cm = confusion_matrix(targets, predictions) + return cm + + +cm = get_confusion_matrix(train_path, len(image_files)) +print(cm) +valid_cm = get_confusion_matrix(valid_path, len(valid_image_files)) +print(valid_cm) + + +# plot some data + +# loss +plt.plot(r.history['loss'], label='train loss') +plt.plot(r.history['val_loss'], label='val loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='train acc') +plt.plot(r.history['val_acc'], label='val acc') +plt.legend() +plt.show() + +from util import plot_confusion_matrix +plot_confusion_matrix(cm, labels, title='Train confusion matrix') +plot_confusion_matrix(valid_cm, labels, title='Validation confusion matrix') \ No newline at end of file diff --git a/cnn_class2/use_pretrained_weights_vgg.py b/cnn_class2/use_pretrained_weights_vgg.py new file mode 100644 index 00000000..433a0b30 --- /dev/null +++ b/cnn_class2/use_pretrained_weights_vgg.py @@ -0,0 +1,182 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.layers import Input, Lambda, Dense, Flatten +from keras.models import Model +from keras.applications.vgg16 import VGG16 +from keras.applications.vgg16 import preprocess_input +from keras.preprocessing import image +from keras.preprocessing.image import ImageDataGenerator + +from sklearn.metrics import confusion_matrix +import numpy as np +import matplotlib.pyplot as plt + +from glob import glob + + +# re-size all the images to this +IMAGE_SIZE = [100, 100] # feel free to change depending on dataset + +# training config: +epochs = 5 +batch_size = 32 + +# https://www.kaggle.com/paultimothymooney/blood-cells +# train_path = '../large_files/blood_cell_images/TRAIN' +# valid_path = '../large_files/blood_cell_images/TEST' + +# https://www.kaggle.com/moltean/fruits +# train_path = '../large_files/fruits-360/Training' +# valid_path = '../large_files/fruits-360/Validation' +train_path = '../large_files/fruits-360-small/Training' +valid_path = '../large_files/fruits-360-small/Validation' + +# useful for getting number of files +image_files = glob(train_path + '/*/*.jp*g') +valid_image_files = glob(valid_path + '/*/*.jp*g') + +# useful for getting number of classes +folders = glob(train_path + '/*') + + +# look at an image for fun +plt.imshow(image.load_img(np.random.choice(image_files))) +plt.show() + + +# add preprocessing layer to the front of VGG +vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) + +# don't train existing weights +for layer in vgg.layers: + layer.trainable = False + +# our layers - you can add more if you want +x = Flatten()(vgg.output) +# x = Dense(1000, activation='relu')(x) +prediction = Dense(len(folders), activation='softmax')(x) + + +# create a model object +model = Model(inputs=vgg.input, outputs=prediction) + +# view the structure of the model +model.summary() + +# tell the model what cost and optimization method to use +model.compile( + loss='categorical_crossentropy', + optimizer='rmsprop', + metrics=['accuracy'] +) + + + +# create an instance of ImageDataGenerator +gen = ImageDataGenerator( + rotation_range=20, + width_shift_range=0.1, + height_shift_range=0.1, + shear_range=0.1, + zoom_range=0.2, + horizontal_flip=True, + vertical_flip=True, + preprocessing_function=preprocess_input +) + + +# test generator to see how it works and some other useful things + +# get label mapping for confusion matrix plot later +test_gen = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE) +print(test_gen.class_indices) +labels = [None] * len(test_gen.class_indices) +for k, v in test_gen.class_indices.items(): + labels[v] = k + +# should be a strangely colored image (due to VGG weights being BGR) +for x, y in test_gen: + print("min:", x[0].min(), "max:", x[0].max()) + plt.title(labels[np.argmax(y[0])]) + plt.imshow(x[0]) + plt.show() + break + + +# create generators +train_generator = gen.flow_from_directory( + train_path, + target_size=IMAGE_SIZE, + shuffle=True, + batch_size=batch_size, +) +valid_generator = gen.flow_from_directory( + valid_path, + target_size=IMAGE_SIZE, + shuffle=True, + batch_size=batch_size, +) + + +# fit the model +r = model.fit_generator( + train_generator, + validation_data=valid_generator, + epochs=epochs, + steps_per_epoch=len(image_files) // batch_size, + validation_steps=len(valid_image_files) // batch_size, +) + + + +def get_confusion_matrix(data_path, N): + # we need to see the data in the same order + # for both predictions and targets + print("Generating confusion matrix", N) + predictions = [] + targets = [] + i = 0 + for x, y in gen.flow_from_directory(data_path, target_size=IMAGE_SIZE, shuffle=False, batch_size=batch_size * 2): + i += 1 + if i % 50 == 0: + print(i) + p = model.predict(x) + p = np.argmax(p, axis=1) + y = np.argmax(y, axis=1) + predictions = np.concatenate((predictions, p)) + targets = np.concatenate((targets, y)) + if len(targets) >= N: + break + + cm = confusion_matrix(targets, predictions) + return cm + + +cm = get_confusion_matrix(train_path, len(image_files)) +print(cm) +valid_cm = get_confusion_matrix(valid_path, len(valid_image_files)) +print(valid_cm) + + +# plot some data + +# loss +plt.plot(r.history['loss'], label='train loss') +plt.plot(r.history['val_loss'], label='val loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='train acc') +plt.plot(r.history['val_acc'], label='val acc') +plt.legend() +plt.show() + +from util import plot_confusion_matrix +plot_confusion_matrix(cm, labels, title='Train confusion matrix') +plot_confusion_matrix(valid_cm, labels, title='Validation confusion matrix') \ No newline at end of file diff --git a/cnn_class2/util.py b/cnn_class2/util.py new file mode 100644 index 00000000..333e3548 --- /dev/null +++ b/cnn_class2/util.py @@ -0,0 +1,55 @@ +# https://deeplearningcourses.com/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import itertools +import numpy as np +import matplotlib.pyplot as plt + + +def plot_confusion_matrix(cm, classes, + normalize=False, + title='Confusion matrix', + cmap=plt.cm.Blues): + """ + This function prints and plots the confusion matrix. + Normalization can be applied by setting `normalize=True`. + """ + if normalize: + cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] + print("Normalized confusion matrix") + else: + print('Confusion matrix, without normalization') + + print(cm) + + plt.imshow(cm, interpolation='nearest', cmap=cmap) + plt.title(title) + plt.colorbar() + tick_marks = np.arange(len(classes)) + plt.xticks(tick_marks, classes, rotation=45) + plt.yticks(tick_marks, classes) + + fmt = '.2f' if normalize else 'd' + thresh = cm.max() / 2. + for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): + plt.text(j, i, format(cm[i, j], fmt), + horizontalalignment="center", + color="white" if cm[i, j] > thresh else "black") + + plt.tight_layout() + plt.ylabel('True label') + plt.xlabel('Predicted label') + plt.show() + + +def y2indicator(Y): + K = len(set(Y)) + N = len(Y) + I = np.empty((N, K)) + I[np.arange(N), Y] = 1 + return I \ No newline at end of file From f1c1ec38699c26cbd8de1e43085ffd0a06f14586 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Sun, 18 Feb 2018 03:33:50 -0500 Subject: [PATCH 018/329] update links --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 51456ad5..5e9d9134 100644 --- a/README.md +++ b/README.md @@ -65,3 +65,5 @@ https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python Deep Learning: GANs and Variational Autoencoders https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders +Deep Learning: Advanced Computer Vision +https://deeplearningcourses.com/advanced-computer-vision \ No newline at end of file From b86e65489199d26bbc8e9c82b92b1822aec33b5f Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Sun, 18 Feb 2018 03:37:10 -0500 Subject: [PATCH 019/329] update link --- cnn_class2/fashion.py | 2 +- cnn_class2/fashion2.py | 2 +- cnn_class2/make_limited_datasets.py | 2 +- cnn_class2/ssd.py | 2 +- cnn_class2/style_transfer1.py | 2 +- cnn_class2/style_transfer2.py | 2 +- cnn_class2/style_transfer3.py | 2 +- cnn_class2/test_softmax.py | 34 +++++++++++++++++++ cnn_class2/tf_resnet.py | 2 +- cnn_class2/tf_resnet_convblock.py | 2 +- cnn_class2/tf_resnet_convblock_starter.py | 2 +- cnn_class2/tf_resnet_first_layers.py | 2 +- cnn_class2/tf_resnet_first_layers_starter.py | 2 +- cnn_class2/tf_resnet_identity_block.py | 2 +- .../tf_resnet_identity_block_starter.py | 2 +- cnn_class2/use_pretrained_weights_resnet.py | 2 +- cnn_class2/use_pretrained_weights_vgg.py | 2 +- 17 files changed, 50 insertions(+), 16 deletions(-) create mode 100644 cnn_class2/test_softmax.py diff --git a/cnn_class2/fashion.py b/cnn_class2/fashion.py index 997322db..db845f8a 100644 --- a/cnn_class2/fashion.py +++ b/cnn_class2/fashion.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division diff --git a/cnn_class2/fashion2.py b/cnn_class2/fashion2.py index a5c7452d..33231b66 100644 --- a/cnn_class2/fashion2.py +++ b/cnn_class2/fashion2.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division diff --git a/cnn_class2/make_limited_datasets.py b/cnn_class2/make_limited_datasets.py index 2b27c83a..911c4563 100644 --- a/cnn_class2/make_limited_datasets.py +++ b/cnn_class2/make_limited_datasets.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision import os diff --git a/cnn_class2/ssd.py b/cnn_class2/ssd.py index 12808b84..192b8e54 100644 --- a/cnn_class2/ssd.py +++ b/cnn_class2/ssd.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision # simple script to adapt object detection notebook from diff --git a/cnn_class2/style_transfer1.py b/cnn_class2/style_transfer1.py index 5f5a0057..46d12d3d 100644 --- a/cnn_class2/style_transfer1.py +++ b/cnn_class2/style_transfer1.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division diff --git a/cnn_class2/style_transfer2.py b/cnn_class2/style_transfer2.py index 704bdbcd..f385ad53 100644 --- a/cnn_class2/style_transfer2.py +++ b/cnn_class2/style_transfer2.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division diff --git a/cnn_class2/style_transfer3.py b/cnn_class2/style_transfer3.py index b9620e9d..bc12d49c 100644 --- a/cnn_class2/style_transfer3.py +++ b/cnn_class2/style_transfer3.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division diff --git a/cnn_class2/test_softmax.py b/cnn_class2/test_softmax.py new file mode 100644 index 00000000..ed33fdab --- /dev/null +++ b/cnn_class2/test_softmax.py @@ -0,0 +1,34 @@ +# https://deeplearningcourses.com/c/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import tensorflow as tf +import numpy as np +import keras +import keras.backend as K + +def custom_softmax(x): + m = tf.reduce_max(x, 1) + x = x - m + e = tf.exp(x) + return e / tf.reduce_sum(e, -1) + + +a = np.random.randn(1, 1000) + +tfy = tf.nn.softmax(a) +ky = keras.activations.softmax(K.variable(a)) +tfc = custom_softmax(a) + +session = K.get_session() + +tfy_ = session.run(tfy) +ky_ = session.run(ky) +tfc_ = session.run(tfc) + +print("tf vs k", np.abs(tfy_ - ky_).sum()) +print("tf vs custom", np.abs(tfy_ - tfc_).sum()) +print("custom vs k", np.abs(tfc_ - ky_).sum()) \ No newline at end of file diff --git a/cnn_class2/tf_resnet.py b/cnn_class2/tf_resnet.py index 2e52998d..4a3c5fa2 100644 --- a/cnn_class2/tf_resnet.py +++ b/cnn_class2/tf_resnet.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/tf_resnet_convblock.py b/cnn_class2/tf_resnet_convblock.py index 703d6fea..397f160b 100644 --- a/cnn_class2/tf_resnet_convblock.py +++ b/cnn_class2/tf_resnet_convblock.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/tf_resnet_convblock_starter.py b/cnn_class2/tf_resnet_convblock_starter.py index dc0322ff..4c61019b 100644 --- a/cnn_class2/tf_resnet_convblock_starter.py +++ b/cnn_class2/tf_resnet_convblock_starter.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/tf_resnet_first_layers.py b/cnn_class2/tf_resnet_first_layers.py index 79b7490b..9157b65c 100644 --- a/cnn_class2/tf_resnet_first_layers.py +++ b/cnn_class2/tf_resnet_first_layers.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/tf_resnet_first_layers_starter.py b/cnn_class2/tf_resnet_first_layers_starter.py index accaf4bd..6d207358 100644 --- a/cnn_class2/tf_resnet_first_layers_starter.py +++ b/cnn_class2/tf_resnet_first_layers_starter.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/tf_resnet_identity_block.py b/cnn_class2/tf_resnet_identity_block.py index 8583e349..3e30d30c 100644 --- a/cnn_class2/tf_resnet_identity_block.py +++ b/cnn_class2/tf_resnet_identity_block.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/tf_resnet_identity_block_starter.py b/cnn_class2/tf_resnet_identity_block_starter.py index 4b822b97..87f7fff7 100644 --- a/cnn_class2/tf_resnet_identity_block_starter.py +++ b/cnn_class2/tf_resnet_identity_block_starter.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input diff --git a/cnn_class2/use_pretrained_weights_resnet.py b/cnn_class2/use_pretrained_weights_resnet.py index 1fa1f7c4..48c6bc23 100644 --- a/cnn_class2/use_pretrained_weights_resnet.py +++ b/cnn_class2/use_pretrained_weights_resnet.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division diff --git a/cnn_class2/use_pretrained_weights_vgg.py b/cnn_class2/use_pretrained_weights_vgg.py index 433a0b30..01cbd619 100644 --- a/cnn_class2/use_pretrained_weights_vgg.py +++ b/cnn_class2/use_pretrained_weights_vgg.py @@ -1,4 +1,4 @@ -# https://deeplearningcourses.com/advanced-computer-vision +# https://deeplearningcourses.com/c/advanced-computer-vision # https://www.udemy.com/advanced-computer-vision from __future__ import print_function, division from builtins import range, input From 6262c718e300506f96b835f0ef4af387bffd9eff Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Sun, 18 Feb 2018 03:37:42 -0500 Subject: [PATCH 020/329] update link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5e9d9134..9777530f 100644 --- a/README.md +++ b/README.md @@ -66,4 +66,4 @@ Deep Learning: GANs and Variational Autoencoders https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders Deep Learning: Advanced Computer Vision -https://deeplearningcourses.com/advanced-computer-vision \ No newline at end of file +https://deeplearningcourses.com/c/advanced-computer-vision \ No newline at end of file From 21067c07e2f1950b4d5cf2b3aa8c9c32dc635ea3 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 2 Mar 2018 23:10:42 -0500 Subject: [PATCH 021/329] add welch --- ab_testing/ex_ttest.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/ab_testing/ex_ttest.py b/ab_testing/ex_ttest.py index 1de68eee..4afd2e54 100644 --- a/ab_testing/ex_ttest.py +++ b/ab_testing/ex_ttest.py @@ -23,4 +23,23 @@ # built-in t-test: t, p = stats.ttest_ind(a, b) -print("t:\t", t, "p2:\t", p) \ No newline at end of file +print("t:\t", t, "p:\t", p) + +# welch's t-test: +t, p = stats.ttest_ind(a, b, equal_var=False) +print("Welch's t-test:") +print("t:\t", t, "p:\t", p) + +# welch's t-test manual: +N1 = len(a) +s1_sq = a.var() +N2 = len(b) +s2_sq = b.var() +t = (a.mean() - b.mean()) / np.sqrt(s1_sq / N1 + s2_sq / N2) + +nu1 = N1 - 1 +nu2 = N2 - 1 +df = (s1_sq / N1 + s2_sq / N2)**2 / ( (s1_sq*s1_sq) / (N1*N1 * nu1) + (s2_sq*s2_sq) / (N2*N2 * nu2) ) +p = (1 - stats.t.cdf(np.abs(t), df=df))*2 +print("Manual Welch t-test") +print("t:\t", t, "p:\t", p) \ No newline at end of file From bb2efa45ad4d7c69516f2edcd327fe1ef6b009b7 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 3 Mar 2018 05:16:31 -0500 Subject: [PATCH 022/329] nlp2 review --- nlp_class2/logistic.py | 137 +++++++++++++++++++++++++++++ nlp_class2/markov.py | 137 +++++++++++++++++++++++++++++ nlp_class2/neural_network.py | 141 ++++++++++++++++++++++++++++++ nlp_class2/neural_network2.py | 156 ++++++++++++++++++++++++++++++++++ 4 files changed, 571 insertions(+) create mode 100644 nlp_class2/logistic.py create mode 100644 nlp_class2/markov.py create mode 100644 nlp_class2/neural_network.py create mode 100644 nlp_class2/neural_network2.py diff --git a/nlp_class2/logistic.py b/nlp_class2/logistic.py new file mode 100644 index 00000000..352c2f57 --- /dev/null +++ b/nlp_class2/logistic.py @@ -0,0 +1,137 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +import random +from datetime import datetime + +import os +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx + +from markov import get_bigram_probs + + +if __name__ == '__main__': + # load in the data + # note: sentences are already converted to sequences of word indexes + # note: you can limit the vocab size if you run out of memory + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(2000) + # sentences, word2idx = get_sentences_with_word2idx() + + # vocab size + V = len(word2idx) + print("Vocab size:", V) + + # we will also treat beginning of sentence and end of sentence as bigrams + # START -> first word + # last word -> END + start_idx = word2idx['START'] + end_idx = word2idx['END'] + + + # a matrix where: + # row = last word + # col = current word + # value at [row, col] = p(current word | last word) + bigram_probs = get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=0.1) + + + # train a logistic model + W = np.random.randn(V, V) / np.sqrt(V) + + losses = [] + epochs = 1 + lr = 1e-1 + + def softmax(a): + a = a - a.max() + exp_a = np.exp(a) + return exp_a / exp_a.sum(axis=1, keepdims=True) + + # what is the loss if we set W = log(bigram_probs)? + W_bigram = np.log(bigram_probs) + bigram_losses = [] + + + t0 = datetime.now() + for epoch in range(epochs): + # shuffle sentences at each epoch + random.shuffle(sentences) + + j = 0 # keep track of iterations + for sentence in sentences: + # convert sentence into one-hot encoded inputs and targets + sentence = [start_idx] + sentence + [end_idx] + n = len(sentence) + inputs = np.zeros((n - 1, V)) + targets = np.zeros((n - 1, V)) + inputs[np.arange(n - 1), sentence[:n-1]] = 1 + targets[np.arange(n - 1), sentence[1:]] = 1 + + # get output predictions + predictions = softmax(inputs.dot(W)) + + # do a gradient descent step + W = W - lr * inputs.T.dot(predictions - targets) + + # keep track of the loss + loss = -np.sum(targets * np.log(predictions)) / (n - 1) + losses.append(loss) + + # keep track of the bigram loss + # only do it for the first epoch to avoid redundancy + if epoch == 0: + bigram_predictions = softmax(inputs.dot(W_bigram)) + bigram_loss = -np.sum(targets * np.log(bigram_predictions)) / (n - 1) + bigram_losses.append(bigram_loss) + + + if j % 10 == 0: + print("epoch:", epoch, "sentence: %s/%s" % (j, len(sentences)), "loss:", loss) + j += 1 + + print("Elapsed time training:", datetime.now() - t0) + plt.plot(losses) + + # plot a horizontal line for the bigram loss + avg_bigram_loss = np.mean(bigram_losses) + print("avg_bigram_loss:", avg_bigram_loss) + plt.axhline(y=avg_bigram_loss, color='r', linestyle='-') + + + # plot smoothed losses to reduce variability + def smoothed_loss(x, decay=0.99): + y = np.zeros(len(x)) + last = 0 + for t in range(len(x)): + z = decay * last + (1 - decay) * x[t] + y[t] = z / (1 - decay ** (t + 1)) + last = z + return y + + plt.plot(smoothed_loss(losses)) + plt.show() + + # plot W and bigram probs side-by-side + # for the most common 200 words + plt.subplot(1,2,1) + plt.title("Logistic Model") + plt.imshow(softmax(W)) + plt.subplot(1,2,2) + plt.title("Bigram Probs") + plt.imshow(bigram_probs) + plt.show() + + + + diff --git a/nlp_class2/markov.py b/nlp_class2/markov.py new file mode 100644 index 00000000..4e639e98 --- /dev/null +++ b/nlp_class2/markov.py @@ -0,0 +1,137 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np + +import os +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx + + + +def get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=1): + # structure of bigram probability matrix will be: + # (last word, current word) --> probability + # we will use add-1 smoothing + # note: we'll always ignore this from the END token + bigram_probs = np.ones((V, V)) * smoothing + for sentence in sentences: + for i in range(len(sentence)): + + if i == 0: + # beginning word + bigram_probs[start_idx, sentence[i]] += 1 + else: + # middle word + bigram_probs[sentence[i-1], sentence[i]] += 1 + + # if we're at the final word + # we update the bigram for last -> current + # AND current -> END token + if i == len(sentence) - 1: + # final word + bigram_probs[sentence[i], end_idx] += 1 + + # normalize the counts along the rows to get probabilities + bigram_probs /= bigram_probs.sum(axis=1, keepdims=True) + return bigram_probs + + + +if __name__ == '__main__': + # load in the data + # note: sentences are already converted to sequences of word indexes + # note: you can limit the vocab size if you run out of memory + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(10000) + # sentences, word2idx = get_sentences_with_word2idx() + + # vocab size + V = len(word2idx) + print("Vocab size:", V) + + # we will also treat beginning of sentence and end of sentence as bigrams + # START -> first word + # last word -> END + start_idx = word2idx['START'] + end_idx = word2idx['END'] + + + # a matrix where: + # row = last word + # col = current word + # value at [row, col] = p(current word | last word) + bigram_probs = get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=0.1) + + + # a function to calculate normalized log prob score + # for a sentence + def get_score(sentence): + score = 0 + for i in range(len(sentence)): + if i == 0: + # beginning word + score += np.log(bigram_probs[start_idx, sentence[i]]) + else: + # middle word + score += np.log(bigram_probs[sentence[i-1], sentence[i]]) + # final word + score += np.log(bigram_probs[sentence[-1], end_idx]) + + # normalize the score + return score / (len(sentence) + 1) + + + # a function to map word indexes back to real words + idx2word = dict((v, k) for k, v in iteritems(word2idx)) + def get_words(sentence): + return ' '.join(idx2word[i] for i in sentence) + + + # when we sample a fake sentence, we want to ensure not to sample + # start token or end token + sample_probs = np.ones(V) + sample_probs[start_idx] = 0 + sample_probs[end_idx] = 0 + sample_probs /= sample_probs.sum() + + # test our model on real and fake sentences + while True: + # real sentence + real_idx = np.random.choice(len(sentences)) + real = sentences[real_idx] + + # fake sentence + fake = np.random.choice(V, size=len(real), p=sample_probs) + + print("REAL:", get_words(real), "SCORE:", get_score(real)) + print("FAKE:", get_words(fake), "SCORE:", get_score(fake)) + + # input your own sentence + custom = input("Enter your own sentence:\n") + custom = custom.lower().split() + + # check that all tokens exist in word2idx (otherwise, we can't get score) + bad_sentence = False + for token in custom: + if token not in word2idx: + bad_sentence = True + + if bad_sentence: + print("Sorry, you entered words that are not in the vocabulary") + else: + # convert sentence into list of indexes + custom = [word2idx[token] for token in custom] + print("SCORE:", get_score(custom)) + + + cont = input("Continue? [Y/n]") + if cont and cont.lower() in ('N', 'n'): + break + diff --git a/nlp_class2/neural_network.py b/nlp_class2/neural_network.py new file mode 100644 index 00000000..d44c6f52 --- /dev/null +++ b/nlp_class2/neural_network.py @@ -0,0 +1,141 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +import random +from datetime import datetime + +import os +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx + +from markov import get_bigram_probs + + +if __name__ == '__main__': + # load in the data + # note: sentences are already converted to sequences of word indexes + # note: you can limit the vocab size if you run out of memory + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(2000) + # sentences, word2idx = get_sentences_with_word2idx() + + # vocab size + V = len(word2idx) + print("Vocab size:", V) + + # we will also treat beginning of sentence and end of sentence as bigrams + # START -> first word + # last word -> END + start_idx = word2idx['START'] + end_idx = word2idx['END'] + + + # a matrix where: + # row = last word + # col = current word + # value at [row, col] = p(current word | last word) + bigram_probs = get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=0.1) + + + # train a shallow neural network model + D = 100 + W1 = np.random.randn(V, D) / np.sqrt(V) + W2 = np.random.randn(D, V) / np.sqrt(D) + + losses = [] + epochs = 1 + lr = 1e-2 + + def softmax(a): + a = a - a.max() + exp_a = np.exp(a) + return exp_a / exp_a.sum(axis=1, keepdims=True) + + # what is the loss if we set W = log(bigram_probs)? + W_bigram = np.log(bigram_probs) + bigram_losses = [] + + t0 = datetime.now() + for epoch in range(epochs): + # shuffle sentences at each epoch + random.shuffle(sentences) + + j = 0 # keep track of iterations + for sentence in sentences: + # convert sentence into one-hot encoded inputs and targets + sentence = [start_idx] + sentence + [end_idx] + n = len(sentence) + inputs = np.zeros((n - 1, V)) + targets = np.zeros((n - 1, V)) + inputs[np.arange(n - 1), sentence[:n-1]] = 1 + targets[np.arange(n - 1), sentence[1:]] = 1 + + # get output predictions + hidden = np.tanh(inputs.dot(W1)) + predictions = softmax(hidden.dot(W2)) + + # do a gradient descent step + W2 = W2 - lr * hidden.T.dot(predictions - targets) + dhidden = (predictions - targets).dot(W2.T) * (1 - hidden * hidden) + W1 = W1 - lr * inputs.T.dot(dhidden) + + # keep track of the loss + loss = -np.sum(targets * np.log(predictions)) / (n - 1) + losses.append(loss) + + # keep track of the bigram loss + # only do it for the first epoch to avoid redundancy + if epoch == 0: + bigram_predictions = softmax(inputs.dot(W_bigram)) + bigram_loss = -np.sum(targets * np.log(bigram_predictions)) / (n - 1) + bigram_losses.append(bigram_loss) + + + if j % 10 == 0: + print("epoch:", epoch, "sentence: %s/%s" % (j, len(sentences)), "loss:", loss) + j += 1 + + print("Elapsed time training:", datetime.now() - t0) + plt.plot(losses) + + # plot a horizontal line for the bigram loss + avg_bigram_loss = np.mean(bigram_losses) + print("avg_bigram_loss:", avg_bigram_loss) + plt.axhline(y=avg_bigram_loss, color='r', linestyle='-') + + + # plot smoothed losses to reduce variability + def smoothed_loss(x, decay=0.99): + y = np.zeros(len(x)) + last = 0 + for t in range(len(x)): + z = decay * last + (1 - decay) * x[t] + y[t] = z / (1 - decay ** (t + 1)) + last = z + return y + + plt.plot(smoothed_loss(losses)) + plt.show() + + # plot W and bigram probs side-by-side + # for the most common 200 words + plt.subplot(1,2,1) + plt.title("Neural Network Model") + plt.imshow(np.tanh(W1).dot(W2)) + plt.subplot(1,2,2) + plt.title("Bigram Probs") + plt.imshow(W_bigram) + plt.show() + + + + diff --git a/nlp_class2/neural_network2.py b/nlp_class2/neural_network2.py new file mode 100644 index 00000000..1f0be410 --- /dev/null +++ b/nlp_class2/neural_network2.py @@ -0,0 +1,156 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +import random +from datetime import datetime + +import os +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx + +from markov import get_bigram_probs + + +if __name__ == '__main__': + # load in the data + # note: sentences are already converted to sequences of word indexes + # note: you can limit the vocab size if you run out of memory + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(2000) + # sentences, word2idx = get_sentences_with_word2idx() + + # vocab size + V = len(word2idx) + print("Vocab size:", V) + + # we will also treat beginning of sentence and end of sentence as bigrams + # START -> first word + # last word -> END + start_idx = word2idx['START'] + end_idx = word2idx['END'] + + + # a matrix where: + # row = last word + # col = current word + # value at [row, col] = p(current word | last word) + bigram_probs = get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=0.1) + + + # train a shallow neural network model + D = 100 + W1 = np.random.randn(V, D) / np.sqrt(V) + W2 = np.random.randn(D, V) / np.sqrt(D) + + losses = [] + epochs = 1 + lr = 1e-2 + + def softmax(a): + a = a - a.max() + exp_a = np.exp(a) + return exp_a / exp_a.sum(axis=1, keepdims=True) + + # what is the loss if we set W = log(bigram_probs)? + W_bigram = np.log(bigram_probs) + bigram_losses = [] + + t0 = datetime.now() + for epoch in range(epochs): + # shuffle sentences at each epoch + random.shuffle(sentences) + + j = 0 # keep track of iterations + for sentence in sentences: + # do not one-hot encoded inputs and targets + sentence = [start_idx] + sentence + [end_idx] + n = len(sentence) + inputs = sentence[:n-1] + targets = sentence[1:] + + # get output predictions + hidden = np.tanh(W1[inputs]) + predictions = softmax(hidden.dot(W2)) + + # keep track of the loss + loss = -np.sum(np.log(predictions[np.arange(n - 1), targets])) / (n - 1) + losses.append(loss) + + # do a gradient descent step + # do it after loss since the calculation of doutput will overwrite predictions + # we don't want to make a copy because it would be slow + doutput = predictions # N x V + doutput[np.arange(n - 1), targets] -= 1 + W2 = W2 - lr * hidden.T.dot(doutput) # (D x N) (N x V) + dhidden = doutput.dot(W2.T) * (1 - hidden * hidden) # (N x V) (V x D) * (N x D) + # # for reference: + # # original: W1 = W1 - lr * inputs.T.dot(dhidden) # VxN NxD --> VxD + + # test this + i = 0 + for w in inputs: # don't include end token + W1[w] = W1[w] - lr * dhidden[i] + i += 1 + + # vs this + # oh_inputs = np.zeros((n - 1, V)) + # oh_inputs[np.arange(n - 1), sentence[:n-1]] = 1 + # W1 = W1 - lr * oh_inputs.T.dot(dhidden) + + # keep track of the bigram loss + # only do it for the first epoch to avoid redundancy + if epoch == 0: + bigram_predictions = softmax(W_bigram[inputs]) + bigram_loss = -np.sum(np.log(bigram_predictions[np.arange(n - 1), targets])) / (n - 1) + bigram_losses.append(bigram_loss) + + + if j % 100 == 0: + print("epoch:", epoch, "sentence: %s/%s" % (j, len(sentences)), "loss:", loss) + j += 1 + + + print("Elapsed time training:", datetime.now() - t0) + plt.plot(losses) + + # plot a horizontal line for the bigram loss + avg_bigram_loss = np.mean(bigram_losses) + print("avg_bigram_loss:", avg_bigram_loss) + plt.axhline(y=avg_bigram_loss, color='r', linestyle='-') + + + # plot smoothed losses to reduce variability + def smoothed_loss(x, decay=0.99): + y = np.zeros(len(x)) + last = 0 + for t in range(len(x)): + z = decay * last + (1 - decay) * x[t] + y[t] = z / (1 - decay ** (t + 1)) + last = z + return y + + plt.plot(smoothed_loss(losses)) + plt.show() + + # plot W and bigram probs side-by-side + # for the most common 200 words + plt.subplot(1,2,1) + plt.title("Neural Network Model") + plt.imshow(np.tanh(W1).dot(W2)) + plt.subplot(1,2,2) + plt.title("Bigram Probs") + plt.imshow(W_bigram) + plt.show() + + + + From a30b32a1a64774963bbc5cc20726d0023a6f8ee1 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 6 Mar 2018 21:18:31 -0500 Subject: [PATCH 023/329] reparameterization trick for later versions of tf --- unsupervised_class3/vae_tf.py | 45 ++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/unsupervised_class3/vae_tf.py b/unsupervised_class3/vae_tf.py index 5c2eb9cc..d2a93b32 100644 --- a/unsupervised_class3/vae_tf.py +++ b/unsupervised_class3/vae_tf.py @@ -9,7 +9,16 @@ import numpy as np import tensorflow as tf import matplotlib.pyplot as plt -st = tf.contrib.bayesflow.stochastic_tensor + +st = None +try: + st = tf.contrib.bayesflow.stochastic_tensor +except: + # doesn't exist in later versions of TF + # we will use the reparameterization trick instead + # watch the later lecture on the reparameterization trick + # to learn about it. + pass Normal = tf.contrib.distributions.Normal Bernoulli = tf.contrib.distributions.Bernoulli @@ -73,10 +82,20 @@ def __init__(self, D, hidden_layer_sizes): # get a sample of Z # we need to use a stochastic tensor # in order for the errors to be backpropagated past this point - with st.value_type(st.SampleValue()): - self.Z = st.StochasticTensor(Normal(loc=self.means, scale=self.stddev)) - # to get back Q(Z), the distribution of Z - # we will later use self.Z.distribution + if st is None: + # doesn't exist in later versions of Tensorflow + # we'll use the same trick we use in Theano + standard_normal = Normal( + loc=np.zeros(M, dtype=np.float32), + scale=np.ones(M, dtype=np.float32) + ) + e = standard_normal.sample(tf.shape(self.means)[0]) + self.Z = e * self.stddev + self.means + else: + with st.value_type(st.SampleValue()): + self.Z = st.StochasticTensor(Normal(loc=self.means, scale=self.stddev)) + # to get back Q(Z), the distribution of Z + # we will later use self.Z.distribution # decoder @@ -139,12 +158,16 @@ def __init__(self, D, hidden_layer_sizes): # now build the cost - kl = tf.reduce_sum( - tf.contrib.distributions.kl_divergence( - self.Z.distribution, standard_normal - ), - 1 - ) + if st is None: + kl = -tf.log(self.stddev) + 0.5*(self.stddev**2 + self.means**2) - 0.5 + kl = tf.reduce_sum(kl, axis=1) + else: + kl = tf.reduce_sum( + tf.contrib.distributions.kl_divergence( + self.Z.distribution, standard_normal + ), + 1 + ) expected_log_likelihood = tf.reduce_sum( self.X_hat_distribution.log_prob(self.X), 1 From 6d450479f4567695342786054ab56786091df56d Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 26 Mar 2018 00:41:03 -0400 Subject: [PATCH 024/329] add spam2 --- nlp_class/lsa.py | 5 +++ nlp_class/sentiment.py | 4 ++ nlp_class/spam2.py | 83 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 nlp_class/spam2.py diff --git a/nlp_class/lsa.py b/nlp_class/lsa.py index 0a583b69..b513b106 100644 --- a/nlp_class/lsa.py +++ b/nlp_class/lsa.py @@ -22,6 +22,11 @@ # copy tokenizer from sentiment example stopwords = set(w.rstrip() for w in open('stopwords.txt')) + +# note: an alternative source of stopwords +# from nltk.corpus import stopwords +# stopwords.words('english') + # add more stopwords specific to this problem stopwords = stopwords.union({ 'introduction', 'edition', 'series', 'application', diff --git a/nlp_class/sentiment.py b/nlp_class/sentiment.py index 6e5edeb0..85be2ee1 100644 --- a/nlp_class/sentiment.py +++ b/nlp_class/sentiment.py @@ -27,6 +27,10 @@ # from http://www.lextek.com/manuals/onix/stopwords1.html stopwords = set(w.rstrip() for w in open('stopwords.txt')) +# note: an alternative source of stopwords +# from nltk.corpus import stopwords +# stopwords.words('english') + # load the reviews # data courtesy of http://www.cs.jhu.edu/~mdredze/datasets/sentiment/index2.html positive_reviews = BeautifulSoup(open('electronics/positive.review').read()) diff --git a/nlp_class/spam2.py b/nlp_class/spam2.py new file mode 100644 index 00000000..37d787cf --- /dev/null +++ b/nlp_class/spam2.py @@ -0,0 +1,83 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer +from sklearn.model_selection import train_test_split +from sklearn.naive_bayes import MultinomialNB +from wordcloud import WordCloud + + +# data from: +# https://www.kaggle.com/uciml/sms-spam-collection-dataset +# file contains some invalid chars +# depending on which version of pandas you have +# an error may be thrown +df = pd.read_csv('../large_files/spam.csv', encoding='ISO-8859-1') + +# drop unnecessary columns +df = df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1) + +# rename columns to something better +df.columns = ['labels', 'data'] + +# create binary labels +df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1}) +Y = df['b_labels'].as_matrix() + +# try multiple ways of calculating features +# tfidf = TfidfVectorizer(decode_error='ignore') +# X = tfidf.fit_transform(df['data']) + +count_vectorizer = CountVectorizer(decode_error='ignore') +X = count_vectorizer.fit_transform(df['data']) + +# split up the data +Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + +# create the model, train it, print scores +model = MultinomialNB() +model.fit(Xtrain, Ytrain) +print("train score:", model.score(Xtrain, Ytrain)) +print("test score:", model.score(Xtest, Ytest)) + + + +# visualize the data +def visualize(label): + words = '' + for msg in df[df['labels'] == label]['data']: + msg = msg.lower() + words += msg + ' ' + wordcloud = WordCloud(width=600, height=400).generate(words) + plt.imshow(wordcloud) + plt.axis('off') + plt.show() + +visualize('spam') +visualize('ham') + + +# see what we're getting wrong +df['predictions'] = model.predict(X) + +# things that should be spam +sneaky_spam = df[(df['predictions'] == 0) & (df['b_labels'] == 1)]['data'] +for msg in sneaky_spam: + print(msg) + +# things that should not be spam +not_actually_spam = df[(df['predictions'] == 1) & (df['b_labels'] == 0)]['data'] +for msg in not_actually_spam: + print(msg) + + From 6b3f8d65526488433f0d11a5e255c02146431557 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 8 Apr 2018 00:32:00 -0400 Subject: [PATCH 025/329] add comment --- unsupervised_class3/vae_tf.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsupervised_class3/vae_tf.py b/unsupervised_class3/vae_tf.py index d2a93b32..d26b0114 100644 --- a/unsupervised_class3/vae_tf.py +++ b/unsupervised_class3/vae_tf.py @@ -91,6 +91,14 @@ def __init__(self, D, hidden_layer_sizes): ) e = standard_normal.sample(tf.shape(self.means)[0]) self.Z = e * self.stddev + self.means + + # note: this also works because Tensorflow + # now does the "magic" for you + # n = Normal( + # loc=self.means, + # scale=self.stddev, + # ) + # self.Z = n.sample() else: with st.value_type(st.SampleValue()): self.Z = st.StochasticTensor(Normal(loc=self.means, scale=self.stddev)) From 232f10e7a4da11feab84d423a097b74e9c120273 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 17 Apr 2018 15:19:19 -0400 Subject: [PATCH 026/329] oversample --- nlp_class/sentiment.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/nlp_class/sentiment.py b/nlp_class/sentiment.py index 85be2ee1..e7e2ed15 100644 --- a/nlp_class/sentiment.py +++ b/nlp_class/sentiment.py @@ -41,8 +41,14 @@ # there are more positive reviews than negative reviews # so let's take a random sample so we have balanced classes -np.random.shuffle(positive_reviews) -positive_reviews = positive_reviews[:len(negative_reviews)] +# np.random.shuffle(positive_reviews) +# positive_reviews = positive_reviews[:len(negative_reviews)] + +# we can also oversample the negative reviews +diff = len(positive_reviews) - len(negative_reviews) +idxs = np.random.choice(len(negative_reviews), size=diff) +extra = [negative_reviews[i] for i in idxs] +negative_reviews += extra # first let's just try to tokenize the text using nltk's tokenizer # let's take the first review for example: From c3a4b3c0397eff1082e759f3d6395f315aaf1be8 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 30 Apr 2018 11:06:41 -0400 Subject: [PATCH 027/329] nlp3 --- nlp_class3/attention.py | 460 ++++++++++++++++++++++++++++++++++ nlp_class3/bilstm_mnist.py | 100 ++++++++ nlp_class3/bilstm_test.py | 33 +++ nlp_class3/cnn_toxic.py | 158 ++++++++++++ nlp_class3/extra_reading.txt | 44 ++++ nlp_class3/lstm_toxic.py | 153 +++++++++++ nlp_class3/memory_network.py | 431 +++++++++++++++++++++++++++++++ nlp_class3/poetry.py | 219 ++++++++++++++++ nlp_class3/simple_rnn_test.py | 78 ++++++ nlp_class3/wseq2seq.py | 344 +++++++++++++++++++++++++ 10 files changed, 2020 insertions(+) create mode 100644 nlp_class3/attention.py create mode 100644 nlp_class3/bilstm_mnist.py create mode 100644 nlp_class3/bilstm_test.py create mode 100644 nlp_class3/cnn_toxic.py create mode 100644 nlp_class3/extra_reading.txt create mode 100644 nlp_class3/lstm_toxic.py create mode 100644 nlp_class3/memory_network.py create mode 100644 nlp_class3/poetry.py create mode 100644 nlp_class3/simple_rnn_test.py create mode 100644 nlp_class3/wseq2seq.py diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py new file mode 100644 index 00000000..ae778880 --- /dev/null +++ b/nlp_class3/attention.py @@ -0,0 +1,460 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import os, sys + +from keras.models import Model +from keras.layers import Input, LSTM, GRU, Dense, Embedding, \ + Bidirectional, RepeatVector, Concatenate, Activation, Dot, Lambda +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences +import keras.backend as K + +import numpy as np +import matplotlib.pyplot as plt + + +# make sure we do softmax over the time axis +# expected shape is N x T x D +# note: the latest version of Keras allows you to pass in axis arg +def softmax_over_time(x): + assert(K.ndim(x) > 2) + e = K.exp(x - K.max(x, axis=1, keepdims=True)) + s = K.sum(e, axis=1, keepdims=True) + return e / s + + + +# config +BATCH_SIZE = 64 +EPOCHS = 100 +LATENT_DIM = 256 +LATENT_DIM_DECODER = 256 # idea: make it different to ensure things all fit together properly! +NUM_SAMPLES = 10000 +MAX_SEQUENCE_LENGTH = 100 +MAX_NUM_WORDS = 20000 +EMBEDDING_DIM = 100 + + + + +# Where we will store the data +input_texts = [] # sentence in original language +target_texts = [] # sentence in target language +target_texts_inputs = [] # sentence in target language offset by 1 + + +# load in the data +# download the data at: http://www.manythings.org/anki/ +t = 0 +for line in open('../large_files/translation/spa.txt'): + # only keep a limited number of samples + t += 1 + if t > NUM_SAMPLES: + break + + # input and target are separated by tab + if '\t' not in line: + continue + + # split up the input and translation + input_text, translation = line.rstrip().split('\t') + + # make the target input and output + # recall we'll be using teacher forcing + target_text = translation + ' ' + target_text_input = ' ' + translation + + input_texts.append(input_text) + target_texts.append(target_text) + target_texts_inputs.append(target_text_input) +print("num samples:", len(input_texts)) + + + + + + +# tokenize the inputs +tokenizer_inputs = Tokenizer(num_words=MAX_NUM_WORDS) +tokenizer_inputs.fit_on_texts(input_texts) +input_sequences = tokenizer_inputs.texts_to_sequences(input_texts) + +# get the word to index mapping for input language +word2idx_inputs = tokenizer_inputs.word_index +print('Found %s unique input tokens.' % len(word2idx_inputs)) + +# determine maximum length input sequence +max_len_input = max(len(s) for s in input_sequences) + +# tokenize the outputs +# don't filter out special characters +# otherwise and won't appear +tokenizer_outputs = Tokenizer(num_words=MAX_NUM_WORDS, filters='') +tokenizer_outputs.fit_on_texts(target_texts + target_texts_inputs) # inefficient, oh well +target_sequences = tokenizer_outputs.texts_to_sequences(target_texts) +target_sequences_inputs = tokenizer_outputs.texts_to_sequences(target_texts_inputs) + +# get the word to index mapping for output language +word2idx_outputs = tokenizer_outputs.word_index +print('Found %s unique output tokens.' % len(word2idx_outputs)) + +# store number of output words for later +# remember to add 1 since indexing starts at 1 +num_words_output = len(word2idx_outputs) + 1 + +# determine maximum length output sequence +max_len_target = max(len(s) for s in target_sequences) + + + + +# pad the sequences +encoder_inputs = pad_sequences(input_sequences, maxlen=max_len_input) +print("encoder_data.shape:", encoder_inputs.shape) +print("encoder_data[0]:", encoder_inputs[0]) + +decoder_inputs = pad_sequences(target_sequences_inputs, maxlen=max_len_target, padding='post') +print("decoder_data[0]:", decoder_inputs[0]) +print("decoder_data.shape:", decoder_inputs.shape) + +decoder_targets = pad_sequences(target_sequences, maxlen=max_len_target, padding='post') + + + + + + +# store all the pre-trained word vectors +print('Loading word vectors...') +word2vec = {} +with open(os.path.join('../large_files/glove.6B/glove.6B.%sd.txt' % EMBEDDING_DIM)) as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec +print('Found %s word vectors.' % len(word2vec)) + + + + +# prepare embedding matrix +print('Filling pre-trained embeddings...') +num_words = min(MAX_NUM_WORDS, len(word2idx_inputs) + 1) +embedding_matrix = np.zeros((num_words, EMBEDDING_DIM)) +for word, i in word2idx_inputs.items(): + if i < MAX_NUM_WORDS: + embedding_vector = word2vec.get(word) + if embedding_vector is not None: + # words not found in embedding index will be all zeros. + embedding_matrix[i] = embedding_vector + + + + +# create embedding layer +embedding_layer = Embedding( + num_words, + EMBEDDING_DIM, + weights=[embedding_matrix], + input_length=max_len_input, + # trainable=True +) + + + + + + +# create targets, since we cannot use sparse +# categorical cross entropy when we have sequences +decoder_targets_one_hot = np.zeros( + ( + len(input_texts), + max_len_target, + num_words_output + ), + dtype='float32' +) + +# assign the values +for i, d in enumerate(decoder_targets): + for t, word in enumerate(d): + decoder_targets_one_hot[i, t, word] = 1 + + + + + + +##### build the model ##### + +# Set up the encoder - simple! +encoder_inputs_placeholder = Input(shape=(max_len_input,)) +x = embedding_layer(encoder_inputs_placeholder) +encoder = Bidirectional(LSTM(LATENT_DIM, return_sequences=True, dropout=0.5)) +encoder_outputs = encoder(x) + + +# Set up the decoder - not so simple +decoder_inputs_placeholder = Input(shape=(max_len_target,)) + +# this word embedding will not use pre-trained vectors +# although you could +decoder_embedding = Embedding(num_words_output, EMBEDDING_DIM) +decoder_inputs_x = decoder_embedding(decoder_inputs_placeholder) + + + + +######### Attention ######### +# Attention layers need to be global because +# they will be repeated Ty times at the decoder +attn_repeat_layer = RepeatVector(max_len_input) +attn_concat_layer = Concatenate(axis=-1) +attn_dense1 = Dense(10, activation='tanh') +attn_dense2 = Dense(1, activation=softmax_over_time) +attn_dot = Dot(axes=1) # to perform the weighted sum of alpha[t] * h[t] + +def one_step_attention(h, st_1): + # h = h(1), ..., h(Tx), shape = (Tx, LATENT_DIM * 2) + # st_1 = s(t-1), shape = (LATENT_DIM_DECODER,) + + # copy s(t-1) Tx times + # now shape = (Tx, LATENT_DIM_DECODER) + st_1 = attn_repeat_layer(st_1) + + # Concatenate all h(t)'s with s(t-1) + # Now of shape (Tx, LATENT_DIM_DECODER + LATENT_DIM * 2) + x = attn_concat_layer([h, st_1]) + + # Neural net first layer + x = attn_dense1(x) + + # Neural net second layer with special softmax over time + alphas = attn_dense2(x) + + # "Dot" the alphas and the h's + # Remember a.dot(b) = sum over a[t] * b[t] + context = attn_dot([alphas, h]) + + return context + + +# define the rest of the decoder (after attention) +decoder_lstm = LSTM(LATENT_DIM_DECODER, return_state=True) +decoder_dense = Dense(num_words_output, activation='softmax') + +initial_s = Input(shape=(LATENT_DIM_DECODER,), name='s0') +initial_c = Input(shape=(LATENT_DIM_DECODER,), name='c0') +context_last_word_concat_layer = Concatenate(axis=2) + + +# Unlike previous seq2seq, we cannot get the output +# all in one step +# Instead we need to do Ty steps +# And in each of those steps, we need to consider +# all Tx h's + +# s, c will be re-assigned in each iteration of the loop +s = initial_s +c = initial_c + +# collect outputs in a list at first +outputs = [] +for t in range(max_len_target): # Ty times + # get the context using attention + context = one_step_attention(encoder_outputs, s) + + # we need a different layer for each time step + selector = Lambda(lambda x: x[:, t:t+1]) + xt = selector(decoder_inputs_x) + + # combine + decoder_lstm_input = context_last_word_concat_layer([context, xt]) + + # pass the combined [context, last word] into the LSTM + # along with [s, c] + # get the new [s, c] and output + o, s, c = decoder_lstm(decoder_lstm_input, initial_state=[s, c]) + + # final dense layer to get next word prediction + decoder_outputs = decoder_dense(o) + outputs.append(decoder_outputs) + + +# 'outputs' is now a list of length Ty +# each element is of shape (batch size, output vocab size) +# therefore if we simply stack all the outputs into 1 tensor +# it would be of shape T x N x D +# we would like it to be of shape N x T x D + +def stack_and_transpose(x): + # x is a list of length T, each element is a batch_size x output_vocab_size tensor + x = K.stack(x) # is now T x batch_size x output_vocab_size tensor + x = K.permute_dimensions(x, pattern=(1, 0, 2)) # is now batch_size x T x output_vocab_size + return x + +# make it a layer +stacker = Lambda(stack_and_transpose) +outputs = stacker(outputs) + +# create the model +model = Model( + inputs=[ + encoder_inputs_placeholder, + decoder_inputs_placeholder, + initial_s, + initial_c, + ], + outputs=outputs +) + +# compile the model +model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) + +# train the model +z = np.zeros((NUM_SAMPLES, LATENT_DIM_DECODER)) # initial [s, c] +r = model.fit( + [encoder_inputs, decoder_inputs, z, z], decoder_targets_one_hot, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=0.2 +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + + +##### Make predictions ##### +# As with the poetry example, we need to create another model +# that can take in the RNN state and previous word as input +# and accept a T=1 sequence. + +# The encoder will be stand-alone +# From this we will get our initial decoder hidden state +# i.e. h(1), ..., h(Tx) +encoder_model = Model(encoder_inputs_placeholder, encoder_outputs) + +# next we define a T=1 decoder model +encoder_outputs_as_input = Input(shape=(max_len_input, LATENT_DIM * 2,)) +decoder_inputs_single = Input(shape=(1,)) +decoder_inputs_single_x = decoder_embedding(decoder_inputs_single) + +# no need to loop over attention steps this time because there is only one step +context = one_step_attention(encoder_outputs_as_input, initial_s) + +# combine context with last word +decoder_lstm_input = context_last_word_concat_layer([context, decoder_inputs_single_x]) + +# lstm and final dense +o, s, c = decoder_lstm(decoder_lstm_input, initial_state=[initial_s, initial_c]) +decoder_outputs = decoder_dense(o) + + +# note: we don't really need the final stack and tranpose +# because there's only 1 output +# it is already of size N x D +# no need to make it 1 x N x D --> N x 1 x D + + + +# create the model object +decoder_model = Model( + inputs=[ + decoder_inputs_single, + encoder_outputs_as_input, + initial_s, + initial_c + ], + outputs=[decoder_outputs, s, c] +) + + + +# map indexes back into real words +# so we can view the results +idx2word_eng = {v:k for k, v in word2idx_inputs.items()} +idx2word_trans = {v:k for k, v in word2idx_outputs.items()} + + + + + +def decode_sequence(input_seq): + # Encode the input as state vectors. + enc_out = encoder_model.predict(input_seq) + + # Generate empty target sequence of length 1. + target_seq = np.zeros((1, 1)) + + # Populate the first character of target sequence with the start character. + # NOTE: tokenizer lower-cases all words + target_seq[0, 0] = word2idx_outputs[''] + + # if we get this we break + eos = word2idx_outputs[''] + + + # [s, c] will be updated in each loop iteration + s = np.zeros((1, LATENT_DIM_DECODER)) + c = np.zeros((1, LATENT_DIM_DECODER)) + + + # Create the translation + output_sentence = [] + for _ in range(max_len_target): + o, s, c = decoder_model.predict([target_seq, enc_out, s, c]) + + + # Get next word + idx = np.argmax(o.flatten()) + + # End sentence of EOS + if eos == idx: + break + + word = '' + if idx > 0: + word = idx2word_trans[idx] + output_sentence.append(word) + + # Update the decoder input + # which is just the word just generated + target_seq[0, 0] = idx + + return ' '.join(output_sentence) + + + + +while True: + # Do some test translations + i = np.random.choice(len(input_texts)) + input_seq = encoder_inputs[i:i+1] + translation = decode_sequence(input_seq) + print('-') + print('Input sentence:', input_texts[i]) + print('Predicted translation:', translation) + print('Actual translation:', target_texts[i]) + + ans = input("Continue? [Y/n]") + if ans and ans.lower().startswith('n'): + break + diff --git a/nlp_class3/bilstm_mnist.py b/nlp_class3/bilstm_mnist.py new file mode 100644 index 00000000..a17e04dc --- /dev/null +++ b/nlp_class3/bilstm_mnist.py @@ -0,0 +1,100 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +from keras.models import Model +from keras.layers import Input, LSTM, GRU, Bidirectional, GlobalMaxPooling1D, Lambda, Concatenate, Dense +import keras.backend as K +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + + +def get_mnist(limit=None): + if not os.path.exists('../large_files'): + print("You must create a folder called large_files adjacent to the class folder first.") + if not os.path.exists('../large_files/train.csv'): + print("Looks like you haven't downloaded the data or it's not in the right spot.") + print("Please get train.csv from https://www.kaggle.com/c/digit-recognizer") + print("and place it in the large_files folder.") + + print("Reading in and transforming data...") + df = pd.read_csv('../large_files/train.csv') + data = df.as_matrix() + np.random.shuffle(data) + X = data[:, 1:].reshape(-1, 28, 28) / 255.0 # data is from 0..255 + Y = data[:, 0] + if limit is not None: + X, Y = X[:limit], Y[:limit] + return X, Y + + + + +# get data +X, Y = get_mnist() + +# config +D = 28 +M = 15 + + +# input is an image of size 28x28 +input_ = Input(shape=(D, D)) + +# up-down +rnn1 = Bidirectional(LSTM(M, return_sequences=True)) +x1 = rnn1(input_) # output is N x D x 2M +x1 = GlobalMaxPooling1D()(x1) # output is N x 2M + +# left-right +rnn2 = Bidirectional(LSTM(M, return_sequences=True)) + +# custom layer +permutor = Lambda(lambda t: K.permute_dimensions(t, pattern=(0, 2, 1))) + +x2 = permutor(input_) +x2 = rnn2(x2) # output is N x D x 2M +x2 = GlobalMaxPooling1D()(x2) # output is N x 2M + +# put them together +concatenator = Concatenate(axis=1) +x = concatenator([x1, x2]) # output is N x 4M + +# final dense layer +output = Dense(10, activation='softmax')(x) + +model = Model(inputs=input_, outputs=output) + +# testing +# o = model.predict(X) +# print("o.shape:", o.shape) + +# compile +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# train +print('Training model...') +r = model.fit(X, Y, batch_size=32, epochs=10, validation_split=0.3) + + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + diff --git a/nlp_class3/bilstm_test.py b/nlp_class3/bilstm_test.py new file mode 100644 index 00000000..b6a1e181 --- /dev/null +++ b/nlp_class3/bilstm_test.py @@ -0,0 +1,33 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Model +from keras.layers import Input, LSTM, GRU, Bidirectional +import numpy as np +import matplotlib.pyplot as plt + + +T = 8 +D = 2 +M = 3 + + +X = np.random.randn(1, T, D) + + +input_ = Input(shape=(T, D)) +# rnn = Bidirectional(LSTM(M, return_state=True, return_sequences=True)) +rnn = Bidirectional(LSTM(M, return_state=True, return_sequences=False)) +x = rnn(input_) + +model = Model(inputs=input_, outputs=x) +o, h1, c1, h2, c2 = model.predict(X) +print("o:", o) +print("o.shape:", o.shape) +print("h1:", h1) +print("c1:", c1) +print("h2:", h2) +print("c2:", c2) \ No newline at end of file diff --git a/nlp_class3/cnn_toxic.py b/nlp_class3/cnn_toxic.py new file mode 100644 index 00000000..1cc5b48a --- /dev/null +++ b/nlp_class3/cnn_toxic.py @@ -0,0 +1,158 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import os +import sys +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences +from keras.layers import Dense, Input, GlobalMaxPooling1D +from keras.layers import Conv1D, MaxPooling1D, Embedding +from keras.models import Model +from sklearn.metrics import roc_auc_score + + +# Download the data: +# https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge +# Download the word vectors: +# http://nlp.stanford.edu/data/glove.6B.zip + + +# some configuration +MAX_SEQUENCE_LENGTH = 100 +MAX_VOCAB_SIZE = 20000 +EMBEDDING_DIM = 100 +VALIDATION_SPLIT = 0.2 +BATCH_SIZE = 128 +EPOCHS = 10 + + + +# load in pre-trained word vectors +print('Loading word vectors...') +word2vec = {} +with open(os.path.join('../large_files/glove.6B/glove.6B.%sd.txt' % EMBEDDING_DIM)) as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec +print('Found %s word vectors.' % len(word2vec)) + + + +# prepare text samples and their labels +print('Loading in comments...') + +train = pd.read_csv("../large_files/toxic-comment/train.csv") +sentences = train["comment_text"].fillna("DUMMY_VALUE").values +possible_labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] +targets = train[possible_labels].values + +print("max sequence length:", max(len(s) for s in sentences)) +print("min sequence length:", min(len(s) for s in sentences)) +s = sorted(len(s) for s in sentences) +print("median sequence length:", s[len(s) // 2]) + + + + +# convert the sentences (strings) into integers +tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE) +tokenizer.fit_on_texts(sentences) +sequences = tokenizer.texts_to_sequences(sentences) +# print("sequences:", sequences); exit() + + +# get word -> integer mapping +word2idx = tokenizer.word_index +print('Found %s unique tokens.' % len(word2idx)) + + +# pad sequences so that we get a N x T matrix +data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) +print('Shape of data tensor:', data.shape) + + + +# prepare embedding matrix +print('Filling pre-trained embeddings...') +num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1) +embedding_matrix = np.zeros((num_words, EMBEDDING_DIM)) +for word, i in word2idx.items(): + if i < MAX_VOCAB_SIZE: + embedding_vector = word2vec.get(word) + if embedding_vector is not None: + # words not found in embedding index will be all zeros. + embedding_matrix[i] = embedding_vector + + + +# load pre-trained word embeddings into an Embedding layer +# note that we set trainable = False so as to keep the embeddings fixed +embedding_layer = Embedding( + num_words, + EMBEDDING_DIM, + weights=[embedding_matrix], + input_length=MAX_SEQUENCE_LENGTH, + trainable=False +) + + +print('Building model...') + +# train a 1D convnet with global maxpooling +input_ = Input(shape=(MAX_SEQUENCE_LENGTH,)) +x = embedding_layer(input_) +x = Conv1D(128, 3, activation='relu')(x) +x = MaxPooling1D(3)(x) +x = Conv1D(128, 3, activation='relu')(x) +x = MaxPooling1D(3)(x) +x = Conv1D(128, 3, activation='relu')(x) +x = GlobalMaxPooling1D()(x) +x = Dense(128, activation='relu')(x) +output = Dense(len(possible_labels), activation='sigmoid')(x) + +model = Model(input_, output) +model.compile( + loss='binary_crossentropy', + optimizer='rmsprop', + metrics=['accuracy'] +) + +print('Training model...') +r = model.fit( + data, + targets, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=VALIDATION_SPLIT +) + + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + +# plot the mean AUC over each label +p = model.predict(data) +aucs = [] +for j in range(6): + auc = roc_auc_score(targets[:,j], p[:,j]) + aucs.append(auc) +print(np.mean(aucs)) diff --git a/nlp_class3/extra_reading.txt b/nlp_class3/extra_reading.txt new file mode 100644 index 00000000..b4d23a0c --- /dev/null +++ b/nlp_class3/extra_reading.txt @@ -0,0 +1,44 @@ +https://deeplearningcourses.com/c/deep-learning-advanced-nlp + +Bidirectional Recurrent Neural Networks +https://maxwell.ict.griffith.edu.au/spl/publications/papers/ieeesp97_schuster.pdf + +Translation Modeling with Bidirectional Recurrent Neural Networks +http://emnlp2014.org/papers/pdf/EMNLP2014003.pdf + +Sequence to Sequence Learning with Neural Networks +https://arxiv.org/abs/1409.3215 + +A Neural Conversational Model +https://arxiv.org/abs/1506.05869v3 + +Neural Machine Translation by Jointly Learning to Align and Translate (Attention) +https://arxiv.org/abs/1409.0473 + +Feed-Forward Networks with Attention Can Solve Some Long-Term Memory Problems (Simplified Attention) +https://arxiv.org/abs/1512.08756 + +Memory Networks +https://arxiv.org/abs/1410.3916 + +Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks +http://arxiv.org/abs/1502.05698 + +End-To-End Memory Networks +http://arxiv.org/abs/1503.08895 + +Ask Me Anything: Dynamic Memory Networks for Natural Language Processing +https://arxiv.org/abs/1506.07285 + +WaveNet +https://deepmind.com/blog/wavenet-generative-model-raw-audio/ + +Tacotron +https://google.github.io/tacotron/ + +Tacotron 2 +https://research.googleblog.com/2017/12/tacotron-2-generating-human-like-speech.html + +An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling +https://arxiv.org/abs/1803.01271 +(just released March 2018!) \ No newline at end of file diff --git a/nlp_class3/lstm_toxic.py b/nlp_class3/lstm_toxic.py new file mode 100644 index 00000000..c232feea --- /dev/null +++ b/nlp_class3/lstm_toxic.py @@ -0,0 +1,153 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +import sys +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from keras.models import Model +from keras.layers import Dense, Embedding, Input +from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences +from keras.optimizers import Adam +from sklearn.metrics import roc_auc_score + + +# Download the data: +# https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge +# Download the word vectors: +# http://nlp.stanford.edu/data/glove.6B.zip + + +# some configuration +MAX_SEQUENCE_LENGTH = 100 +MAX_VOCAB_SIZE = 20000 +EMBEDDING_DIM = 50 +VALIDATION_SPLIT = 0.2 +BATCH_SIZE = 128 +EPOCHS = 5 + + + +# load in pre-trained word vectors +print('Loading word vectors...') +word2vec = {} +with open(os.path.join('../large_files/glove.6B/glove.6B.%sd.txt' % EMBEDDING_DIM)) as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec +print('Found %s word vectors.' % len(word2vec)) + + + +# prepare text samples and their labels +print('Loading in comments...') + +train = pd.read_csv("../large_files/toxic-comment/train.csv") +sentences = train["comment_text"].fillna("DUMMY_VALUE").values +possible_labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] +targets = train[possible_labels].values + + + + +# convert the sentences (strings) into integers +tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE) +tokenizer.fit_on_texts(sentences) +sequences = tokenizer.texts_to_sequences(sentences) + + + +# get word -> integer mapping +word2idx = tokenizer.word_index +print('Found %s unique tokens.' % len(word2idx)) + + +# pad sequences so that we get a N x T matrix +data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) +print('Shape of data tensor:', data.shape) + + + +# prepare embedding matrix +print('Filling pre-trained embeddings...') +num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1) +embedding_matrix = np.zeros((num_words, EMBEDDING_DIM)) +for word, i in word2idx.items(): + if i < MAX_VOCAB_SIZE: + embedding_vector = word2vec.get(word) + if embedding_vector is not None: + # words not found in embedding index will be all zeros. + embedding_matrix[i] = embedding_vector + + + +# load pre-trained word embeddings into an Embedding layer +# note that we set trainable = False so as to keep the embeddings fixed +embedding_layer = Embedding( + num_words, + EMBEDDING_DIM, + weights=[embedding_matrix], + input_length=MAX_SEQUENCE_LENGTH, + trainable=False +) + + + +print('Building model...') + +# create an LSTM network with a single LSTM +input_ = Input(shape=(MAX_SEQUENCE_LENGTH,)) +x = embedding_layer(input_) +# x = LSTM(15, return_sequences=True)(x) +x = Bidirectional(LSTM(15, return_sequences=True))(x) +x = GlobalMaxPool1D()(x) +output = Dense(len(possible_labels), activation="sigmoid")(x) + +model = Model(input_, output) +model.compile( + loss='binary_crossentropy', + optimizer=Adam(lr=0.01), + metrics=['accuracy'] +) + + +print('Training model...') +r = model.fit( + data, + targets, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=VALIDATION_SPLIT +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + +p = model.predict(data) +aucs = [] +for j in range(6): + auc = roc_auc_score(targets[:,j], p[:,j]) + aucs.append(auc) +print(np.mean(aucs)) + diff --git a/nlp_class3/memory_network.py b/nlp_class3/memory_network.py new file mode 100644 index 00000000..a73a3eed --- /dev/null +++ b/nlp_class3/memory_network.py @@ -0,0 +1,431 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input + + +import numpy as np +import keras.backend as K +import matplotlib.pyplot as plt +import re +import tarfile + +from keras.models import Model +from keras.layers import Dense, Embedding, Input, Lambda, Reshape, add, dot, Activation +from keras.preprocessing.sequence import pad_sequences +from keras.optimizers import Adam, RMSprop +from keras.utils.data_utils import get_file + + + +# get the data and open the compressed file using the tarfile library +# https://research.fb.com/downloads/babi/ +path = get_file( + 'babi-tasks-v1-2.tar.gz', + origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz') +tar = tarfile.open(path) + + + +# relevant data in the tar file +# there's lots more data in there, check it out if you want! +challenges = { + # QA1 with 10,000 samples + 'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt', + # QA2 with 10,000 samples + 'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt', +} + + + +def tokenize(sent): + '''Return the tokens of a sentence including punctuation. + + >>> tokenize('Bob dropped the apple. Where is the apple?') + ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?'] + ''' + return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()] + + + + +def get_stories(f): + # data will return a list of triples + # each triple contains: + # 1. a story + # 2. a question about the story + # 3. the answer to the question + data = [] + + # use this list to keep track of the story so far + story = [] + + # print a random story, helpful to see the data + printed = False + for line in f: + line = line.decode('utf-8').strip() + + # split the line number from the rest of the line + nid, line = line.split(' ', 1) + + # see if we should begin a new story + if int(nid) == 1: + story = [] + + # this line contains a question and answer if it has a tab + # questionanswer + # it also tells us which line in the story is relevant to the answer + # Note: we actually ignore this fact, since the model will learn + # which lines are important + # Note: the max line number is not the number of lines of the story + # since lines with questions do not contain any story + # one story may contain MULTIPLE questions + if '\t' in line: + q, a, supporting = line.split('\t') + q = tokenize(q) + + # numbering each line is very useful + # it's the equivalent of adding a unique token to the front + # of each sentence + story_so_far = [[str(i)] + s for i, s in enumerate(story) if s] + + # uncomment if you want to see what a story looks like + # if not printed and np.random.rand() < 0.5: + # print("story_so_far:", story_so_far) + # printed = True + data.append((story_so_far, q, a)) + story.append('') + else: + # just add the line to the current story + story.append(tokenize(line)) + return data + + +# recursively flatten a list +def should_flatten(el): + return not isinstance(el, (str, bytes)) + +def flatten(l): + for el in l: + if should_flatten(el): + yield from flatten(el) + else: + yield el + + + + + + +# convert stories from words into lists of word indexes (integers) +# pad each sequence so that they are the same length +# we will need to re-pad the stories later so that each story +# is the same length +def vectorize_stories(data, word2idx, story_maxlen, query_maxlen): + inputs, queries, answers = [], [], [] + for story, query, answer in data: + inputs.append([[word2idx[w] for w in s] for s in story]) + queries.append([word2idx[w] for w in query]) + answers.append([word2idx[answer]]) + return ( + [pad_sequences(x, maxlen=story_maxlen) for x in inputs], + pad_sequences(queries, maxlen=query_maxlen), + np.array(answers) + ) + + + +# this is like 'pad_sequences' but for entire stories +# we are padding each story with zeros so every story +# has the same number of sentences +# append an array of zeros of size: +# (max_sentences - num sentences in story, max words in sentence) +def stack_inputs(inputs, story_maxsents, story_maxlen): + for i, story in enumerate(inputs): + inputs[i] = np.concatenate( + [ + story, + np.zeros((story_maxsents - story.shape[0], story_maxlen), 'int') + ] + ) + return np.stack(inputs) + + + + + +# make a function to get the data since +# we want to load both the single supporting fact data +# and the two supporting fact data later +def get_data(challenge_type): + # input should either be 'single_supporting_fact_10k' or 'two_supporting_facts_10k' + challenge = challenges[challenge_type] + + + # returns a list of triples of: + # (story, question, answer) + # story is a list of sentences + # question is a sentence + # answer is a word + train_stories = get_stories(tar.extractfile(challenge.format('train'))) + test_stories = get_stories(tar.extractfile(challenge.format('test'))) + + + # group all the stories together + stories = train_stories + test_stories + + # so we can get the max length of each story, of each sentence, and of each question + story_maxlen = max((len(s) for x, _, _ in stories for s in x)) + story_maxsents = max((len(x) for x, _, _ in stories)) + query_maxlen = max(len(x) for _, x, _ in stories) + + # Create vocabulary of corpus and find size, including a padding element. + vocab = sorted(set(flatten(stories))) + vocab.insert(0, '') + vocab_size = len(vocab) + + # Create an index mapping for the vocabulary. + word2idx = {c:i for i, c in enumerate(vocab)} + + # convert stories from strings to lists of integers + inputs_train, queries_train, answers_train = vectorize_stories( + train_stories, + word2idx, + story_maxlen, + query_maxlen + ) + inputs_test, queries_test, answers_test = vectorize_stories( + test_stories, + word2idx, + story_maxlen, + query_maxlen + ) + + # convert inputs into 3-D numpy arrays + inputs_train = stack_inputs(inputs_train, story_maxsents, story_maxlen) + inputs_test = stack_inputs(inputs_test, story_maxsents, story_maxlen) + print("inputs_train.shape, inputs_test.shape", inputs_train.shape, inputs_test.shape) + + + # return model inputs for keras + return train_stories, test_stories, \ + inputs_train, queries_train, answers_train, \ + inputs_test, queries_test, answers_test, \ + story_maxsents, story_maxlen, query_maxlen, \ + vocab, vocab_size + + +# get the single supporting fact data +train_stories, test_stories, \ + inputs_train, queries_train, answers_train, \ + inputs_test, queries_test, answers_test, \ + story_maxsents, story_maxlen, query_maxlen, \ + vocab, vocab_size = get_data('single_supporting_fact_10k') + + + + +##### create the model ##### +embedding_dim = 15 + + +# turn the story into a sequence of embedding vectors +# one for each story line +# treating each story line like a "bag of words" +input_story_ = Input((story_maxsents, story_maxlen)) +embedded_story = Embedding(vocab_size, embedding_dim)(input_story_) +embedded_story = Lambda(lambda x: K.sum(x, axis=2))(embedded_story) +print("input_story_.shape, embedded_story.shape:", input_story_.shape, embedded_story.shape) + + +# turn the question into an embedding +# also a bag of words +input_question_ = Input((query_maxlen,)) +embedded_question = Embedding(vocab_size, embedding_dim)(input_question_) +embedded_question = Lambda(lambda x: K.sum(x, axis=1))(embedded_question) + +# add a "sequence length" of 1 so that it can +# be dotted with the story later +embedded_question = Reshape((1, embedding_dim))(embedded_question) +print("inp_q.shape, emb_q.shape:", input_question_.shape, embedded_question.shape) + + +# calculate the weights for each story line +# embedded_story.shape = (N, num sentences, embedding_dim) +# embedded_question.shape = (N, 1, embedding_dim) +x = dot([embedded_story, embedded_question], 2) +x = Reshape((story_maxsents,))(x) # flatten the vector +x = Activation('softmax')(x) +story_weights = Reshape((story_maxsents, 1))(x) # unflatten it again to be dotted later +print("story_weights.shape:", story_weights.shape) + + + +x = dot([story_weights, embedded_story], 1) +x = Reshape((embedding_dim,))(x) # flatten it again +ans = Dense(vocab_size, activation='softmax')(x) + +# make the model +model = Model([input_story_, input_question_], ans) + +# compile the model +model.compile( + optimizer=RMSprop(lr=1e-2), + loss='sparse_categorical_crossentropy', + metrics=['accuracy'] +) + +# train the model +r = model.fit( + [inputs_train, queries_train], + answers_train, + epochs=4, + batch_size=32, + validation_data=([inputs_test, queries_test], answers_test) +) + + +# Check how we weight each input sentence given a story and question +debug_model = Model([input_story_, input_question_], story_weights) + +# choose a random story +story_idx = np.random.choice(len(train_stories)) + +# get weights from debug model +i = inputs_train[story_idx:story_idx+1] +q = queries_train[story_idx:story_idx+1] +w = debug_model.predict([i, q]).flatten() + +story, question, ans = train_stories[story_idx] +print("story:\n") +for i, line in enumerate(story): + print("{:1.5f}".format(w[i]), "\t", " ".join(line)) + +print("question:", " ".join(question)) +print("answer:", ans) + + +# pause so we can see the output +input("Hit enter to continue\n\n") + + + +##### two supporting facts ##### + + +# get the two supporting fact data +train_stories, test_stories, \ + inputs_train, queries_train, answers_train, \ + inputs_test, queries_test, answers_test, \ + story_maxsents, story_maxlen, query_maxlen, \ + vocab, vocab_size = get_data('two_supporting_facts_10k') + + + +##### create the model ##### +embedding_dim = 30 + + +# make a function for this so we can use it again +def embed_and_sum(x, axis=2): + x = Embedding(vocab_size, embedding_dim)(x) + x = Lambda(lambda x: K.sum(x, axis))(x) + return x + +# define the inputs +input_story_ = Input((story_maxsents, story_maxlen)) +input_question_ = Input((query_maxlen,)) + + +# embed the inputs +embedded_story = embed_and_sum(input_story_) +embedded_question = embed_and_sum(input_question_, 1) + + +# final dense will be used in each hop +dense_layer = Dense(embedding_dim, activation='elu') + + +# define one hop +# the "query" can be the question, or the answer from the previous hop +def hop(query, story): + # query.shape = (embedding_dim,) + # story.shape = (num sentences, embedding_dim) + x = Reshape((1, embedding_dim))(query) # make it (1, embedding_dim) + x = dot([story, x], 2) + x = Reshape((story_maxsents,))(x) # flatten it for softmax + x = Activation('softmax')(x) + story_weights = Reshape((story_maxsents, 1))(x) # unflatten for dotting + + # makes a new embedding + story_embedding2 = embed_and_sum(input_story_) + x = dot([story_weights, story_embedding2], 1) + x = Reshape((embedding_dim,))(x) + x = dense_layer(x) + return x, story_embedding2, story_weights + + +# do the hops +ans1, embedded_story, story_weights1 = hop(embedded_question, embedded_story) +ans2, _, story_weights2 = hop(ans1, embedded_story) + +# get the final answer +ans = Dense(vocab_size, activation='softmax')(ans2) + + +# build the model +model2 = Model([input_story_, input_question_], ans) + +# compile the model +model2.compile( + optimizer=RMSprop(lr=5e-3), + loss='sparse_categorical_crossentropy', + metrics=['accuracy'] +) + +# fit the model +r = model2.fit( + [inputs_train, queries_train], + answers_train, + epochs=30, + batch_size=32, + validation_data=([inputs_test, queries_test], answers_test) +) + + +### print story line weights again ### +debug_model2 = Model( + [input_story_, input_question_], + [story_weights1, story_weights2] +) + +# choose a random story +story_idx = np.random.choice(len(train_stories)) + +# get weights from debug model +i = inputs_train[story_idx:story_idx+1] +q = queries_train[story_idx:story_idx+1] +w1, w2 = debug_model2.predict([i, q]) +w1 = w1.flatten() +w2 = w2.flatten() + +story, question, ans = train_stories[story_idx] +print("story:\n") +for j, line in enumerate(story): + print("{:1.5f}".format(w1[j]), "\t", "{:1.5f}".format(w2[j]), "\t", " ".join(line)) + +print("question:", " ".join(question)) +print("answer:", ans) +print("prediction:", vocab[ np.argmax(model2.predict([i, q])[0]) ]) + + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() diff --git a/nlp_class3/poetry.py b/nlp_class3/poetry.py new file mode 100644 index 00000000..123df4b9 --- /dev/null +++ b/nlp_class3/poetry.py @@ -0,0 +1,219 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import os +import sys +import string +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from keras.models import Model +from keras.layers import Dense, Embedding, Input, LSTM +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences +from keras.optimizers import Adam, SGD + + +# some configuration +MAX_SEQUENCE_LENGTH = 100 +MAX_VOCAB_SIZE = 3000 +EMBEDDING_DIM = 50 +VALIDATION_SPLIT = 0.2 +BATCH_SIZE = 128 +EPOCHS = 2000 +LATENT_DIM = 25 + +# load in the data +input_texts = [] +target_texts = [] +for line in open('../hmm_class/robert_frost.txt'): + line = line.rstrip() + if not line: + continue + + input_line = ' ' + line + target_line = line + ' ' + + input_texts.append(input_line) + target_texts.append(target_line) + + +all_lines = input_texts + target_texts + +# convert the sentences (strings) into integers +tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE, filters='') +tokenizer.fit_on_texts(all_lines) +input_sequences = tokenizer.texts_to_sequences(input_texts) +target_sequences = tokenizer.texts_to_sequences(target_texts) + +# find max seq length +max_sequence_length_from_data = max(len(s) for s in input_sequences) +print('Max sequence length:', max_sequence_length_from_data) + + +# get word -> integer mapping +word2idx = tokenizer.word_index +print('Found %s unique tokens.' % len(word2idx)) +assert('' in word2idx) +assert('' in word2idx) + + +# pad sequences so that we get a N x T matrix +max_sequence_length = min(max_sequence_length_from_data, MAX_SEQUENCE_LENGTH) +input_sequences = pad_sequences(input_sequences, maxlen=max_sequence_length, padding='post') +target_sequences = pad_sequences(target_sequences, maxlen=max_sequence_length, padding='post') +print('Shape of data tensor:', input_sequences.shape) + + + +# load in pre-trained word vectors +print('Loading word vectors...') +word2vec = {} +with open(os.path.join('../large_files/glove.6B/glove.6B.%sd.txt' % EMBEDDING_DIM)) as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec +print('Found %s word vectors.' % len(word2vec)) + + + +# prepare embedding matrix +print('Filling pre-trained embeddings...') +num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1) +embedding_matrix = np.zeros((num_words, EMBEDDING_DIM)) +for word, i in word2idx.items(): + if i < MAX_VOCAB_SIZE: + embedding_vector = word2vec.get(word) + if embedding_vector is not None: + # words not found in embedding index will be all zeros. + embedding_matrix[i] = embedding_vector + + + +# one-hot the targets (can't use sparse cross-entropy) +one_hot_targets = np.zeros((len(input_sequences), max_sequence_length, num_words)) +for i, target_sequence in enumerate(target_sequences): + for t, word in enumerate(target_sequence): + if word > 0: + one_hot_targets[i, t, word] = 1 + + + +# load pre-trained word embeddings into an Embedding layer +embedding_layer = Embedding( + num_words, + EMBEDDING_DIM, + weights=[embedding_matrix], + # trainable=False +) + + + +print('Building model...') + +# create an LSTM network with a single LSTM +input_ = Input(shape=(max_sequence_length,)) +initial_h = Input(shape=(LATENT_DIM,)) +initial_c = Input(shape=(LATENT_DIM,)) +x = embedding_layer(input_) +lstm = LSTM(LATENT_DIM, return_sequences=True, return_state=True) +x, _, _ = lstm(x, initial_state=[initial_h, initial_c]) # don't need the states here +dense = Dense(num_words, activation='softmax') +output = dense(x) + +model = Model([input_, initial_h, initial_c], output) +model.compile( + loss='categorical_crossentropy', + # optimizer='rmsprop', + optimizer=Adam(lr=0.01), + # optimizer=SGD(lr=0.01, momentum=0.9), + metrics=['accuracy'] +) + +print('Training model...') +z = np.zeros((len(input_sequences), LATENT_DIM)) +r = model.fit( + [input_sequences, z, z], + one_hot_targets, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=VALIDATION_SPLIT +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + + +# make a sampling model +input2 = Input(shape=(1,)) # we'll only input one word at a time +x = embedding_layer(input2) +x, h, c = lstm(x, initial_state=[initial_h, initial_c]) # now we need states to feed back in +output2 = dense(x) +sampling_model = Model([input2, initial_h, initial_c], [output2, h, c]) + + +# reverse word2idx dictionary to get back words +# during prediction +idx2word = {v:k for k, v in word2idx.items()} + + +def sample_line(): + # initial inputs + np_input = np.array([[ word2idx[''] ]]) + h = np.zeros((1, LATENT_DIM)) + c = np.zeros((1, LATENT_DIM)) + + # so we know when to quit + eos = word2idx[''] + + # store the output here + output_sentence = [] + + for _ in range(max_sequence_length): + o, h, c = sampling_model.predict([np_input, h, c]) + + # print("o.shape:", o.shape, o[0,0,:10]) + # idx = np.argmax(o[0,0]) + probs = o[0,0] + if np.argmax(probs) == 0: + print("wtf") + probs[0] = 0 + probs /= probs.sum() + idx = np.random.choice(len(probs), p=probs) + if idx == eos: + break + + # accuulate output + output_sentence.append(idx2word.get(idx, '' % idx)) + + # make the next input into model + np_input[0,0] = idx + + return ' '.join(output_sentence) + +# generate a 4 line poem +while True: + for _ in range(4): + print(sample_line()) + + ans = input("---generate another? [Y/n]---") + if ans and ans[0].lower().startswith('n'): + break diff --git a/nlp_class3/simple_rnn_test.py b/nlp_class3/simple_rnn_test.py new file mode 100644 index 00000000..d7e13a07 --- /dev/null +++ b/nlp_class3/simple_rnn_test.py @@ -0,0 +1,78 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Model +from keras.layers import Input, LSTM, GRU +import numpy as np +import matplotlib.pyplot as plt + + +T = 8 +D = 2 +M = 3 + + +X = np.random.randn(1, T, D) + + +def lstm1(): + input_ = Input(shape=(T, D)) + rnn = LSTM(M, return_state=True) + x = rnn(input_) + + model = Model(inputs=input_, outputs=x) + o, h, c = model.predict(X) + print("o:", o) + print("h:", h) + print("c:", c) + + +def lstm2(): + input_ = Input(shape=(T, D)) + rnn = LSTM(M, return_state=True, return_sequences=True) + # rnn = GRU(M, return_state=True) + x = rnn(input_) + + model = Model(inputs=input_, outputs=x) + o, h, c = model.predict(X) + print("o:", o) + print("h:", h) + print("c:", c) + + +def gru1(): + input_ = Input(shape=(T, D)) + rnn = GRU(M, return_state=True) + x = rnn(input_) + + model = Model(inputs=input_, outputs=x) + o, h = model.predict(X) + print("o:", o) + print("h:", h) + + +def gru2(): + input_ = Input(shape=(T, D)) + rnn = GRU(M, return_state=True, return_sequences=True) + x = rnn(input_) + + model = Model(inputs=input_, outputs=x) + o, h = model.predict(X) + print("o:", o) + print("h:", h) + + + +print("lstm1:") +lstm1() +print("lstm2:") +lstm2() +print("gru1:") +gru1() +print("gru2:") +gru2() + + diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py new file mode 100644 index 00000000..7f6c8a9f --- /dev/null +++ b/nlp_class3/wseq2seq.py @@ -0,0 +1,344 @@ +# # https://deeplearningcourses.com/c/deep-learning-advanced-nlp +# get the data at: http://www.manythings.org/anki/ +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import os, sys + +from keras.models import Model +from keras.layers import Input, LSTM, GRU, Dense, Embedding +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences +from keras.utils import to_categorical +import numpy as np +import matplotlib.pyplot as plt + + +# some config +BATCH_SIZE = 64 # Batch size for training. +EPOCHS = 100 # Number of epochs to train for. +LATENT_DIM = 256 # Latent dimensionality of the encoding space. +NUM_SAMPLES = 10000 # Number of samples to train on. +MAX_SEQUENCE_LENGTH = 100 +MAX_NUM_WORDS = 20000 +EMBEDDING_DIM = 100 + +# Where we will store the data +input_texts = [] # sentence in original language +target_texts = [] # sentence in target language +target_texts_inputs = [] # sentence in target language offset by 1 + + +# load in the data +# download the data at: http://www.manythings.org/anki/ +t = 0 +for line in open('../large_files/translation/spa.txt'): + # only keep a limited number of samples + t += 1 + if t > NUM_SAMPLES: + break + + # input and target are separated by tab + if '\t' not in line: + continue + + # split up the input and translation + input_text, translation = line.rstrip().split('\t') + + # make the target input and output + # recall we'll be using teacher forcing + target_text = translation + ' ' + target_text_input = ' ' + translation + + input_texts.append(input_text) + target_texts.append(target_text) + target_texts_inputs.append(target_text_input) +print("num samples:", len(input_texts)) + + +# tokenize the inputs +tokenizer_inputs = Tokenizer(num_words=MAX_NUM_WORDS) +tokenizer_inputs.fit_on_texts(input_texts) +input_sequences = tokenizer_inputs.texts_to_sequences(input_texts) + +# get the word to index mapping for input language +word2idx_inputs = tokenizer_inputs.word_index +print('Found %s unique input tokens.' % len(word2idx_inputs)) + +# determine maximum length input sequence +max_len_input = max(len(s) for s in input_sequences) + +# tokenize the outputs +# don't filter out special characters +# otherwise and won't appear +tokenizer_outputs = Tokenizer(num_words=MAX_NUM_WORDS, filters='') +tokenizer_outputs.fit_on_texts(target_texts + target_texts_inputs) # inefficient, oh well +target_sequences = tokenizer_outputs.texts_to_sequences(target_texts) +target_sequences_inputs = tokenizer_outputs.texts_to_sequences(target_texts_inputs) + +# get the word to index mapping for output language +word2idx_outputs = tokenizer_outputs.word_index +print('Found %s unique output tokens.' % len(word2idx_outputs)) + +# store number of output words for later +# remember to add 1 since indexing starts at 1 +num_words_output = len(word2idx_outputs) + 1 + +# determine maximum length output sequence +max_len_target = max(len(s) for s in target_sequences) + + +# pad the sequences +encoder_inputs = pad_sequences(input_sequences, maxlen=max_len_input) +print("encoder_inputs.shape:", encoder_inputs.shape) +print("encoder_inputs[0]:", encoder_inputs[0]) + +decoder_inputs = pad_sequences(target_sequences_inputs, maxlen=max_len_target, padding='post') +print("decoder_inputs[0]:", decoder_inputs[0]) +print("decoder_inputs.shape:", decoder_inputs.shape) + +decoder_targets = pad_sequences(target_sequences, maxlen=max_len_target, padding='post') + + + + + + + +# store all the pre-trained word vectors +print('Loading word vectors...') +word2vec = {} +with open(os.path.join('../large_files/glove.6B/glove.6B.%sd.txt' % EMBEDDING_DIM)) as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec +print('Found %s word vectors.' % len(word2vec)) + + + + +# prepare embedding matrix +print('Filling pre-trained embeddings...') +num_words = min(MAX_NUM_WORDS, len(word2idx_inputs) + 1) +embedding_matrix = np.zeros((num_words, EMBEDDING_DIM)) +for word, i in word2idx_inputs.items(): + if i < MAX_NUM_WORDS: + embedding_vector = word2vec.get(word) + if embedding_vector is not None: + # words not found in embedding index will be all zeros. + embedding_matrix[i] = embedding_vector + + + + +# create embedding layer +embedding_layer = Embedding( + num_words, + EMBEDDING_DIM, + weights=[embedding_matrix], + input_length=max_len_input, + # trainable=True +) + + +# create targets, since we cannot use sparse +# categorical cross entropy when we have sequences +decoder_targets_one_hot = np.zeros( + ( + len(input_texts), + max_len_target, + num_words_output + ), + dtype='float32' +) + +# assign the values +for i, d in enumerate(decoder_targets): + for t, word in enumerate(d): + decoder_targets_one_hot[i, t, word] = 1 + + + + +##### build the model ##### +encoder_inputs_placeholder = Input(shape=(max_len_input,)) +x = embedding_layer(encoder_inputs_placeholder) +encoder = LSTM(LATENT_DIM, return_state=True, dropout=0.5) +encoder_outputs, h, c = encoder(x) +# encoder_outputs, h = encoder(x) #gru + +# keep only the states to pass into decoder +encoder_states = [h, c] +# encoder_states = [state_h] # gru + +# Set up the decoder, using [h, c] as initial state. +decoder_inputs_placeholder = Input(shape=(max_len_target,)) + +# this word embedding will not use pre-trained vectors +# although you could +decoder_embedding = Embedding(num_words_output, LATENT_DIM) +decoder_inputs_x = decoder_embedding(decoder_inputs_placeholder) + +# since the decoder is a "to-many" model we want to have +# return_sequences=True +decoder_lstm = LSTM(LATENT_DIM, return_sequences=True, return_state=True, dropout=0.5) +decoder_outputs, _, _ = decoder_lstm( + decoder_inputs_x, + initial_state=encoder_states +) + +# decoder_outputs, _ = decoder_gru( +# decoder_inputs_x, +# initial_state=encoder_states +# ) + +# final dense layer for predictions +decoder_dense = Dense(num_words_output, activation='softmax') +decoder_outputs = decoder_dense(decoder_outputs) + +# Create the model object +model = Model([encoder_inputs_placeholder, decoder_inputs_placeholder], decoder_outputs) + +# Compile the model and train it +model.compile( + optimizer='rmsprop', + loss='categorical_crossentropy', + metrics=['accuracy'] +) +r = model.fit( + [encoder_inputs, decoder_inputs], decoder_targets_one_hot, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=0.2, +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + +# Save model +model.save('s2s.h5') + + + + +##### Make predictions ##### +# As with the poetry example, we need to create another model +# that can take in the RNN state and previous word as input +# and accept a T=1 sequence. + +# The encoder will be stand-alone +# From this we will get our initial decoder hidden state +encoder_model = Model(encoder_inputs_placeholder, encoder_states) + +decoder_state_input_h = Input(shape=(LATENT_DIM,)) +decoder_state_input_c = Input(shape=(LATENT_DIM,)) +decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] +# decoder_states_inputs = [decoder_state_input_h] # gru + +decoder_inputs_single = Input(shape=(1,)) +decoder_inputs_single_x = decoder_embedding(decoder_inputs_single) + +# this time, we want to keep the states too, to be output +# by our sampling model +decoder_outputs, h, c = decoder_lstm( + decoder_inputs_single_x, + initial_state=decoder_states_inputs +) +# decoder_outputs, state_h = decoder_lstm( +# decoder_inputs_single_x, +# initial_state=decoder_states_inputs +# ) #gru +decoder_states = [h, c] +# decoder_states = [h] # gru +decoder_outputs = decoder_dense(decoder_outputs) + +# The sampling model +# inputs: y(t-1), h(t-1), c(t-1) +# outputs: y(t), h(t), c(t) +decoder_model = Model( + [decoder_inputs_single] + decoder_states_inputs, + [decoder_outputs] + decoder_states +) + +# map indexes back into real words +# so we can view the results +idx2word_eng = {v:k for k, v in word2idx_inputs.items()} +idx2word_trans = {v:k for k, v in word2idx_outputs.items()} + + +def decode_sequence(input_seq): + # Encode the input as state vectors. + states_value = encoder_model.predict(input_seq) + + # Generate empty target sequence of length 1. + target_seq = np.zeros((1, 1)) + + # Populate the first character of target sequence with the start character. + # NOTE: tokenizer lower-cases all words + target_seq[0, 0] = word2idx_outputs[''] + + # if we get this we break + eos = word2idx_outputs[''] + + # Create the translation + output_sentence = [] + for _ in range(max_len_target): + output_tokens, h, c = decoder_model.predict( + [target_seq] + states_value + ) + # output_tokens, h = decoder_model.predict( + # [target_seq] + states_value + # ) # gru + + # Get next word + idx = np.argmax(output_tokens[0, 0, :]) + + # End sentence of EOS + if eos == idx: + break + + word = '' + if idx > 0: + word = idx2word_trans[idx] + output_sentence.append(word) + + # Update the decoder input + # which is just the word just generated + target_seq[0, 0] = idx + + # Update states + states_value = [h, c] + # states_value = [h] # gru + + return ' '.join(output_sentence) + + + +while True: + # Do some test translations + i = np.random.choice(len(input_texts)) + input_seq = encoder_inputs[i:i+1] + translation = decode_sequence(input_seq) + print('-') + print('Input:', input_texts[i]) + print('Translation:', translation) + + ans = input("Continue? [Y/n]") + if ans and ans.lower().startswith('n'): + break + From 6f468330663cffdd3debf694b7d5f9c2cfc8a08a Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 1 May 2018 00:55:32 -0400 Subject: [PATCH 028/329] add url --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9777530f..ff892440 100644 --- a/README.md +++ b/README.md @@ -66,4 +66,7 @@ Deep Learning: GANs and Variational Autoencoders https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders Deep Learning: Advanced Computer Vision -https://deeplearningcourses.com/c/advanced-computer-vision \ No newline at end of file +https://deeplearningcourses.com/c/advanced-computer-vision + +Deep Learning: Advanced NLP and RNNs +https://deeplearningcourses.com/c/deep-learning-advanced-nlp \ No newline at end of file From ec5a9245eb0edec07dab672ee75b704123d97f90 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 6 May 2018 21:37:36 -0400 Subject: [PATCH 029/329] pca impl --- unsupervised_class2/pca_impl.py | 43 +++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 unsupervised_class2/pca_impl.py diff --git a/unsupervised_class2/pca_impl.py b/unsupervised_class2/pca_impl.py new file mode 100644 index 00000000..3bbedd84 --- /dev/null +++ b/unsupervised_class2/pca_impl.py @@ -0,0 +1,43 @@ +# https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python +# https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from util import getKaggleMNIST + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# decompose covariance +covX = np.cov(Xtrain.T) +lambdas, Q = np.linalg.eigh(covX) + + +# lambdas are sorted from smallest --> largest +# some may be slightly negative due to precision +idx = np.argsort(-lambdas) +lambdas = lambdas[idx] # sort in proper order +lambdas = np.maximum(lambdas, 0) # get rid of negatives +Q = Q[:,idx] + + +# plot the first 2 columns of Z +Z = Xtrain.dot(Q) +plt.scatter(Z[:,0], Z[:,1], s=100, c=Ytrain, alpha=0.3) +plt.show() + + +# plot variances +plt.plot(lambdas) +plt.title("Variance of each component") +plt.show() + +# cumulative variance +plt.plot(np.cumsum(lambdas)) +plt.title("Cumulative variance") +plt.show() \ No newline at end of file From 3f301f62313714f9dce6ba2fa819bd5899583a99 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 11 May 2018 16:29:28 -0400 Subject: [PATCH 030/329] improve plot --- unsupervised_class3/visualize_latent_space.py | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/unsupervised_class3/visualize_latent_space.py b/unsupervised_class3/visualize_latent_space.py index b95bd3fb..4693d83a 100644 --- a/unsupervised_class3/visualize_latent_space.py +++ b/unsupervised_class3/visualize_latent_space.py @@ -17,11 +17,23 @@ # convert X to binary variable X = (X > 0.5).astype(np.float32) + for i in range(len(X)): + plt.imshow(X[i].reshape(28, 28), cmap='gray') + plt.title("Label: %s" % Y[i]) + plt.show() + ans = input("Show another? [Y/n]") + if ans and ans[0].lower().startswith('n'): + break + + vae = VariationalAutoencoder(784, [200, 100, 2]) - vae.fit(X) + vae.fit(X.copy()) + # fit will shuffle the data + # so we need to copy to prevent messing up the order + # for plotting later, we need Z and Y to correspond Z = vae.transform(X) - plt.scatter(Z[:,0], Z[:,1], c=Y) + plt.scatter(Z[:,0], Z[:,1], c=Y, s=10) plt.show() @@ -34,12 +46,12 @@ # build Z first so we don't have to keep # re-calling the predict function # it is particularly slow in theano - Z = [] + Z2 = [] for i, x in enumerate(x_values): for j, y in enumerate(y_values): z = [x, y] - Z.append(z) - X_recon = vae.prior_predictive_with_input(Z) + Z2.append(z) + X_recon = vae.prior_predictive_with_input(Z2) k = 0 for i, x in enumerate(x_values): @@ -51,3 +63,4 @@ image[(n - i - 1) * 28:(n - i) * 28, j * 28:(j + 1) * 28] = x_recon plt.imshow(image, cmap='gray') plt.show() + From f750e73b55691f1f5f509c301784fb3928535475 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 15 May 2018 16:15:46 -0400 Subject: [PATCH 031/329] remove P --- hmm_class/hmmd_tf.py | 3 +-- hmm_class/hmmd_theano2.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/hmm_class/hmmd_tf.py b/hmm_class/hmmd_tf.py index da05a823..d3ecfc2b 100644 --- a/hmm_class/hmmd_tf.py +++ b/hmm_class/hmmd_tf.py @@ -51,8 +51,7 @@ def log_likelihood(self, x): return -self.session.run(self.cost, feed_dict={self.tfx: x}) def get_cost_multi(self, X): - P = np.random.random(len(X)) - return np.array([self.get_cost(x) for x, p in zip(X, P)]) + return np.array([self.get_cost(x) for x in X]) def build(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): M, V = preSoftmaxB.shape diff --git a/hmm_class/hmmd_theano2.py b/hmm_class/hmmd_theano2.py index f4aef5c4..f652526c 100644 --- a/hmm_class/hmmd_theano2.py +++ b/hmm_class/hmmd_theano2.py @@ -87,8 +87,7 @@ def log_likelihood(self, x): return -self.cost_op(x) def get_cost_multi(self, X): - P = np.random.random(len(X)) - return np.array([self.get_cost(x) for x, p in zip(X, P)]) + return np.array([self.get_cost(x) for x in X]) def set(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB): self.preSoftmaxPi = theano.shared(preSoftmaxPi) From 19dc344339e4ac57d764e954fed57110947591b9 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 24 May 2018 03:39:45 -0400 Subject: [PATCH 032/329] add pos tensorflow --- nlp_class2/pos_tf.py | 252 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 252 insertions(+) create mode 100644 nlp_class2/pos_tf.py diff --git a/nlp_class2/pos_tf.py b/nlp_class2/pos_tf.py new file mode 100644 index 00000000..d043962a --- /dev/null +++ b/nlp_class2/pos_tf.py @@ -0,0 +1,252 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import matplotlib.pyplot as plt +import tensorflow as tf +import os +import sys +sys.path.append(os.path.abspath('..')) +from pos_baseline import get_data +from sklearn.utils import shuffle +from util import init_weight +from datetime import datetime +from sklearn.metrics import f1_score + +from tensorflow.contrib.rnn import static_rnn as get_rnn_output +from tensorflow.contrib.rnn import BasicRNNCell, GRUCell + + + +def get_data(split_sequences=False): + if not os.path.exists('chunking'): + print("Please create a folder in your local directory called 'chunking'") + print("train.txt and test.txt should be stored in there.") + print("Please check the comments to get the download link.") + exit() + elif not os.path.exists('chunking/train.txt'): + print("train.txt is not in chunking/train.txt") + print("Please check the comments to get the download link.") + exit() + elif not os.path.exists('chunking/test.txt'): + print("test.txt is not in chunking/test.txt") + print("Please check the comments to get the download link.") + exit() + + word2idx = {} + tag2idx = {} + word_idx = 1 + tag_idx = 1 + Xtrain = [] + Ytrain = [] + currentX = [] + currentY = [] + for line in open('chunking/train.txt'): + line = line.rstrip() + if line: + r = line.split() + word, tag, _ = r + if word not in word2idx: + word2idx[word] = word_idx + word_idx += 1 + currentX.append(word2idx[word]) + + if tag not in tag2idx: + tag2idx[tag] = tag_idx + tag_idx += 1 + currentY.append(tag2idx[tag]) + elif split_sequences: + Xtrain.append(currentX) + Ytrain.append(currentY) + currentX = [] + currentY = [] + + if not split_sequences: + Xtrain = currentX + Ytrain = currentY + + # load and score test data + Xtest = [] + Ytest = [] + currentX = [] + currentY = [] + for line in open('chunking/test.txt'): + line = line.rstrip() + if line: + r = line.split() + word, tag, _ = r + if word in word2idx: + currentX.append(word2idx[word]) + else: + currentX.append(word_idx) # use this as unknown + currentY.append(tag2idx[tag]) + elif split_sequences: + Xtest.append(currentX) + Ytest.append(currentY) + currentX = [] + currentY = [] + if not split_sequences: + Xtest = currentX + Ytest = currentY + + return Xtrain, Ytrain, Xtest, Ytest, word2idx + + +def flatten(l): + return [item for sublist in l for item in sublist] + + + +# get the data +Xtrain, Ytrain, Xtest, Ytest, word2idx = get_data(split_sequences=True) +V = len(word2idx) + 2 # vocab size (+1 for unknown, +1 b/c start from 1) +K = len(set(flatten(Ytrain)) | set(flatten(Ytest))) + 1 # num classes + + +# training config +epochs = 20 +learning_rate = 1e-4 +mu = 0.99 +batch_size = 32 +hidden_layer_size = 10 +embedding_dim = 10 +sequence_length = max(len(x) for x in Xtrain + Xtest) + + + +# pad sequences +Xtrain = tf.keras.preprocessing.sequence.pad_sequences(Xtrain, maxlen=sequence_length) +Ytrain = tf.keras.preprocessing.sequence.pad_sequences(Ytrain, maxlen=sequence_length) +Xtest = tf.keras.preprocessing.sequence.pad_sequences(Xtest, maxlen=sequence_length) +Ytest = tf.keras.preprocessing.sequence.pad_sequences(Ytest, maxlen=sequence_length) +print("Xtrain.shape:", Xtrain.shape) +print("Ytrain.shape:", Ytrain.shape) + + + +# inputs +inputs = tf.placeholder(tf.int32, shape=(None, sequence_length)) +targets = tf.placeholder(tf.int32, shape=(None, sequence_length)) +num_samples = tf.shape(inputs)[0] # useful for later + +# embedding +We = np.random.randn(V, embedding_dim).astype(np.float32) + +# output layer +Wo = init_weight(hidden_layer_size, K).astype(np.float32) +bo = np.zeros(K).astype(np.float32) + +# make them tensorflow variables +tfWe = tf.Variable(We) +tfWo = tf.Variable(Wo) +tfbo = tf.Variable(bo) + +# make the rnn unit +rnn_unit = GRUCell(num_units=hidden_layer_size, activation=tf.nn.relu) + + +# get the output +x = tf.nn.embedding_lookup(tfWe, inputs) + +# converts x from a tensor of shape N x T x D +# into a list of length T, where each element is a tensor of shape N x D +x = tf.unstack(x, sequence_length, 1) + +# get the rnn output +outputs, states = get_rnn_output(rnn_unit, x, dtype=tf.float32) + + +# outputs are now of size (T, N, M) +# so make it (N, T, M) +outputs = tf.transpose(outputs, (1, 0, 2)) +outputs = tf.reshape(outputs, (sequence_length*num_samples, hidden_layer_size)) # NT x M + +# Linear activation, using rnn inner loop last output +logits = tf.matmul(outputs, tfWo) + tfbo # NT x K +predictions = tf.argmax(logits, 1) +predict_op = tf.reshape(predictions, (num_samples, sequence_length)) +labels_flat = tf.reshape(targets, [-1]) + +cost_op = tf.reduce_mean( + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, + labels=labels_flat + ) +) +train_op = tf.train.AdamOptimizer(1e-2).minimize(cost_op) + + + + +# init stuff +sess = tf.InteractiveSession() +init = tf.global_variables_initializer() +sess.run(init) + + +# training loop +costs = [] +n_batches = len(Ytrain) // batch_size +for i in range(epochs): + n_total = 0 + n_correct = 0 + + t0 = datetime.now() + Xtrain, Ytrain = shuffle(Xtrain, Ytrain) + cost = 0 + + for j in range(n_batches): + x = Xtrain[j*batch_size:(j+1)*batch_size] + y = Ytrain[j*batch_size:(j+1)*batch_size] + + # get the cost, predictions, and perform a gradient descent step + c, p, _ = sess.run( + (cost_op, predict_op, train_op), + feed_dict={inputs: x, targets: y}) + cost += c + + # calculate the accuracy + for yi, pi in zip(y, p): + # we don't care about the padded entries so ignore them + yii = yi[yi > 0] + pii = pi[yi > 0] + n_correct += np.sum(yii == pii) + n_total += len(yii) + + # print stuff out periodically + if j % 10 == 0: + sys.stdout.write( + "j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % + (j, n_batches, float(n_correct)/n_total, cost) + ) + sys.stdout.flush() + + # get test acc. too + p = sess.run(predict_op, feed_dict={inputs: Xtest, targets: Ytest}) + n_test_correct = 0 + n_test_total = 0 + for yi, pi in zip(Ytest, p): + yii = yi[yi > 0] + pii = pi[yi > 0] + n_test_correct += np.sum(yii == pii) + n_test_total += len(yii) + test_acc = float(n_test_correct) / n_test_total + + print( + "i:", i, "cost:", "%.4f" % cost, + "train acc:", "%.4f" % (float(n_correct)/n_total), + "test acc:", "%.4f" % test_acc, + "time for epoch:", (datetime.now() - t0) + ) + costs.append(cost) + +plt.plot(costs) +plt.show() + + From 93d36ba444840630baac0f496d1d1a27483dedc6 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 24 May 2018 13:06:24 -0400 Subject: [PATCH 033/329] more tf --- nlp_class2/ner_tf.py | 224 ++++++++++++++++++++++++++++++++++++ nlp_class2/pos_ner_keras.py | 224 ++++++++++++++++++++++++++++++++++++ nlp_class2/pos_tf.py | 4 +- 3 files changed, 450 insertions(+), 2 deletions(-) create mode 100644 nlp_class2/ner_tf.py create mode 100644 nlp_class2/pos_ner_keras.py diff --git a/nlp_class2/ner_tf.py b/nlp_class2/ner_tf.py new file mode 100644 index 00000000..7f8fa2c1 --- /dev/null +++ b/nlp_class2/ner_tf.py @@ -0,0 +1,224 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import matplotlib.pyplot as plt +import tensorflow as tf +import os +import sys +sys.path.append(os.path.abspath('..')) +from pos_baseline import get_data +from sklearn.utils import shuffle +from util import init_weight +from datetime import datetime +from sklearn.metrics import f1_score + +from tensorflow.contrib.rnn import static_rnn as get_rnn_output +from tensorflow.contrib.rnn import BasicRNNCell, GRUCell + + + +def get_data(split_sequences=False): + word2idx = {} + tag2idx = {} + word_idx = 1 + tag_idx = 1 + Xtrain = [] + Ytrain = [] + currentX = [] + currentY = [] + for line in open('ner.txt'): + line = line.rstrip() + if line: + r = line.split() + word, tag = r + word = word.lower() + if word not in word2idx: + word2idx[word] = word_idx + word_idx += 1 + currentX.append(word2idx[word]) + + if tag not in tag2idx: + tag2idx[tag] = tag_idx + tag_idx += 1 + currentY.append(tag2idx[tag]) + elif split_sequences: + Xtrain.append(currentX) + Ytrain.append(currentY) + currentX = [] + currentY = [] + + if not split_sequences: + Xtrain = currentX + Ytrain = currentY + + print("number of samples:", len(Xtrain)) + Xtrain, Ytrain = shuffle(Xtrain, Ytrain) + Ntest = int(0.3*len(Xtrain)) + Xtest = Xtrain[:Ntest] + Ytest = Ytrain[:Ntest] + Xtrain = Xtrain[Ntest:] + Ytrain = Ytrain[Ntest:] + print("number of classes:", len(tag2idx)) + return Xtrain, Ytrain, Xtest, Ytest, word2idx, tag2idx + + + +def flatten(l): + return [item for sublist in l for item in sublist] + + + +# get the data +Xtrain, Ytrain, Xtest, Ytest, word2idx, tag2idx = get_data(split_sequences=True) +V = len(word2idx) + 2 # vocab size (+1 for unknown, +1 for pad) +K = len(set(flatten(Ytrain)) | set(flatten(Ytest))) + 1 # num classes + + +# training config +epochs = 5 +learning_rate = 1e-2 +mu = 0.99 +batch_size = 32 +hidden_layer_size = 10 +embedding_dim = 10 +sequence_length = max(len(x) for x in Xtrain + Xtest) + + + +# pad sequences +Xtrain = tf.keras.preprocessing.sequence.pad_sequences(Xtrain, maxlen=sequence_length) +Ytrain = tf.keras.preprocessing.sequence.pad_sequences(Ytrain, maxlen=sequence_length) +Xtest = tf.keras.preprocessing.sequence.pad_sequences(Xtest, maxlen=sequence_length) +Ytest = tf.keras.preprocessing.sequence.pad_sequences(Ytest, maxlen=sequence_length) +print("Xtrain.shape:", Xtrain.shape) +print("Ytrain.shape:", Ytrain.shape) + + + +# inputs +inputs = tf.placeholder(tf.int32, shape=(None, sequence_length)) +targets = tf.placeholder(tf.int32, shape=(None, sequence_length)) +num_samples = tf.shape(inputs)[0] # useful for later + +# embedding +We = np.random.randn(V, embedding_dim).astype(np.float32) + +# output layer +Wo = init_weight(hidden_layer_size, K).astype(np.float32) +bo = np.zeros(K).astype(np.float32) + +# make them tensorflow variables +tfWe = tf.Variable(We) +tfWo = tf.Variable(Wo) +tfbo = tf.Variable(bo) + +# make the rnn unit +rnn_unit = GRUCell(num_units=hidden_layer_size, activation=tf.nn.relu) + + +# get the output +x = tf.nn.embedding_lookup(tfWe, inputs) + +# converts x from a tensor of shape N x T x D +# into a list of length T, where each element is a tensor of shape N x D +x = tf.unstack(x, sequence_length, 1) + +# get the rnn output +outputs, states = get_rnn_output(rnn_unit, x, dtype=tf.float32) + + +# outputs are now of size (T, N, M) +# so make it (N, T, M) +outputs = tf.transpose(outputs, (1, 0, 2)) +outputs = tf.reshape(outputs, (sequence_length*num_samples, hidden_layer_size)) # NT x M + +# Linear activation, using rnn inner loop last output +logits = tf.matmul(outputs, tfWo) + tfbo # NT x K +predictions = tf.argmax(logits, 1) +predict_op = tf.reshape(predictions, (num_samples, sequence_length)) +labels_flat = tf.reshape(targets, [-1]) + +cost_op = tf.reduce_mean( + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, + labels=labels_flat + ) +) +train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost_op) + + + + +# init stuff +sess = tf.InteractiveSession() +init = tf.global_variables_initializer() +sess.run(init) + + +# training loop +costs = [] +n_batches = len(Ytrain) // batch_size +for i in range(epochs): + n_total = 0 + n_correct = 0 + + t0 = datetime.now() + Xtrain, Ytrain = shuffle(Xtrain, Ytrain) + cost = 0 + + for j in range(n_batches): + x = Xtrain[j*batch_size:(j+1)*batch_size] + y = Ytrain[j*batch_size:(j+1)*batch_size] + + # get the cost, predictions, and perform a gradient descent step + c, p, _ = sess.run( + (cost_op, predict_op, train_op), + feed_dict={inputs: x, targets: y}) + cost += c + + # calculate the accuracy + for yi, pi in zip(y, p): + # we don't care about the padded entries so ignore them + yii = yi[yi > 0] + pii = pi[yi > 0] + n_correct += np.sum(yii == pii) + n_total += len(yii) + + # print stuff out periodically + if j % 10 == 0: + sys.stdout.write( + "j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % + (j, n_batches, float(n_correct)/n_total, cost) + ) + sys.stdout.flush() + + # get test acc. too + p = sess.run(predict_op, feed_dict={inputs: Xtest, targets: Ytest}) + n_test_correct = 0 + n_test_total = 0 + for yi, pi in zip(Ytest, p): + yii = yi[yi > 0] + pii = pi[yi > 0] + n_test_correct += np.sum(yii == pii) + n_test_total += len(yii) + test_acc = float(n_test_correct) / n_test_total + + print( + "i:", i, "cost:", "%.4f" % cost, + "train acc:", "%.4f" % (float(n_correct)/n_total), + "test acc:", "%.4f" % test_acc, + "time for epoch:", (datetime.now() - t0) + ) + costs.append(cost) + +plt.plot(costs) +plt.show() + + diff --git a/nlp_class2/pos_ner_keras.py b/nlp_class2/pos_ner_keras.py new file mode 100644 index 00000000..a150fc29 --- /dev/null +++ b/nlp_class2/pos_ner_keras.py @@ -0,0 +1,224 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import matplotlib.pyplot as plt +import os +import sys +sys.path.append(os.path.abspath('..')) +from pos_baseline import get_data +from sklearn.utils import shuffle +from util import init_weight +from datetime import datetime +from sklearn.metrics import f1_score + +from keras.models import Model +from keras.layers import Input, Dense, Embedding, LSTM, GRU +from keras.preprocessing.sequence import pad_sequences +from keras.preprocessing.text import Tokenizer +from keras.optimizers import Adam + + +MAX_VOCAB_SIZE = 20000 +MAX_TAGS = 100 + + + +def get_data_pos(split_sequences=False): + if not os.path.exists('chunking'): + print("Please create a folder in your local directory called 'chunking'") + print("train.txt and test.txt should be stored in there.") + print("Please check the comments to get the download link.") + exit() + elif not os.path.exists('chunking/train.txt'): + print("train.txt is not in chunking/train.txt") + print("Please check the comments to get the download link.") + exit() + elif not os.path.exists('chunking/test.txt'): + print("test.txt is not in chunking/test.txt") + print("Please check the comments to get the download link.") + exit() + + Xtrain = [] + Ytrain = [] + currentX = [] + currentY = [] + for line in open('chunking/train.txt'): + line = line.rstrip() + if line: + r = line.split() + word, tag, _ = r + currentX.append(word) + + currentY.append(tag) + elif split_sequences: + Xtrain.append(currentX) + Ytrain.append(currentY) + currentX = [] + currentY = [] + + if not split_sequences: + Xtrain = currentX + Ytrain = currentY + + # load and score test data + Xtest = [] + Ytest = [] + currentX = [] + currentY = [] + for line in open('chunking/test.txt'): + line = line.rstrip() + if line: + r = line.split() + word, tag, _ = r + currentX.append(word) + currentY.append(tag) + elif split_sequences: + Xtest.append(currentX) + Ytest.append(currentY) + currentX = [] + currentY = [] + if not split_sequences: + Xtest = currentX + Ytest = currentY + + return Xtrain, Ytrain, Xtest, Ytest + + +def get_data_ner(split_sequences=False): + Xtrain = [] + Ytrain = [] + currentX = [] + currentY = [] + for line in open('ner.txt'): + line = line.rstrip() + if line: + r = line.split() + word, tag = r + word = word.lower() + currentX.append(word) + currentY.append(tag) + elif split_sequences: + Xtrain.append(currentX) + Ytrain.append(currentY) + currentX = [] + currentY = [] + + if not split_sequences: + Xtrain = currentX + Ytrain = currentY + + print("number of samples:", len(Xtrain)) + Xtrain, Ytrain = shuffle(Xtrain, Ytrain) + Ntest = int(0.3*len(Xtrain)) + Xtest = Xtrain[:Ntest] + Ytest = Ytrain[:Ntest] + Xtrain = Xtrain[Ntest:] + Ytrain = Ytrain[Ntest:] + return Xtrain, Ytrain, Xtest, Ytest + + + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = get_data_ner(split_sequences=True) + + +# convert the sentences (strings) into integers +tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE) +tokenizer.fit_on_texts(Xtrain) +Xtrain = tokenizer.texts_to_sequences(Xtrain) +Xtest = tokenizer.texts_to_sequences(Xtest) + +# get word -> integer mapping +word2idx = tokenizer.word_index +print('Found %s unique tokens.' % len(word2idx)) +vocab_size = min(MAX_VOCAB_SIZE, len(word2idx) + 1) + + +# convert the tags (strings) into integers +tokenizer2 = Tokenizer(num_words=MAX_TAGS) +tokenizer2.fit_on_texts(Ytrain) +Ytrain = tokenizer2.texts_to_sequences(Ytrain) +Ytest = tokenizer2.texts_to_sequences(Ytest) + +# get tag -> integer mapping +tag2idx = tokenizer2.word_index +print('Found %s unique tags.' % len(tag2idx)) +num_tags = min(MAX_TAGS, len(tag2idx) + 1) + + +# pad sequences +sequence_length = max(len(x) for x in Xtrain + Xtest) +Xtrain = pad_sequences(Xtrain, maxlen=sequence_length) +Ytrain = pad_sequences(Ytrain, maxlen=sequence_length) +Xtest = pad_sequences(Xtest, maxlen=sequence_length) +Ytest = pad_sequences(Ytest, maxlen=sequence_length) +print("Xtrain.shape:", Xtrain.shape) +print("Ytrain.shape:", Ytrain.shape) + + +# one-hot the targets +Ytrain_onehot = np.zeros((len(Ytrain), sequence_length, num_tags), dtype='float32') +for n, sample in enumerate(Ytrain): + for t, tag in enumerate(sample): + Ytrain_onehot[n, t, tag] = 1 + +Ytest_onehot = np.zeros((len(Ytest), sequence_length, num_tags), dtype='float32') +for n, sample in enumerate(Ytest): + for t, tag in enumerate(sample): + Ytest_onehot[n, t, tag] = 1 + + + +# training config +epochs = 30 +batch_size = 32 +hidden_layer_size = 10 +embedding_dim = 10 + + + + +# build the model +input_ = Input(shape=(sequence_length,)) +x = Embedding(vocab_size, embedding_dim)(input_) +x = GRU(hidden_layer_size, return_sequences=True)(x) +output = Dense(num_tags, activation='softmax')(x) + + +model = Model(input_, output) +model.compile( + loss='categorical_crossentropy', + optimizer=Adam(lr=1e-2), + metrics=['accuracy'] +) + + +print('Training model...') +r = model.fit( + Xtrain, + Ytrain_onehot, + batch_size=batch_size, + epochs=epochs, + validation_data=(Xtest, Ytest_onehot) +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + diff --git a/nlp_class2/pos_tf.py b/nlp_class2/pos_tf.py index d043962a..5370dabc 100644 --- a/nlp_class2/pos_tf.py +++ b/nlp_class2/pos_tf.py @@ -111,7 +111,7 @@ def flatten(l): # training config epochs = 20 -learning_rate = 1e-4 +learning_rate = 1e-2 mu = 0.99 batch_size = 32 hidden_layer_size = 10 @@ -179,7 +179,7 @@ def flatten(l): labels=labels_flat ) ) -train_op = tf.train.AdamOptimizer(1e-2).minimize(cost_op) +train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost_op) From 46b561971596c4f21cd7ac3d75213c12ddf3b7f1 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 27 May 2018 12:45:20 -0400 Subject: [PATCH 034/329] update --- rl/approx_q_learning.py | 190 ++++++++++++++++++++++++++++++++++++++++ rl/q_learning.py | 1 + 2 files changed, 191 insertions(+) create mode 100644 rl/approx_q_learning.py diff --git a/rl/approx_q_learning.py b/rl/approx_q_learning.py new file mode 100644 index 00000000..c3c1a35d --- /dev/null +++ b/rl/approx_q_learning.py @@ -0,0 +1,190 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import matplotlib.pyplot as plt +from grid_world import standard_grid, negative_grid +from iterative_policy_evaluation import print_values, print_policy +from monte_carlo_es import max_dict +from sarsa import random_action, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS + +SA2IDX = {} +IDX = 0 + +class Model: + def __init__(self): + self.theta = np.random.randn(25) / np.sqrt(25) + # if we use SA2IDX, a one-hot encoding for every (s,a) pair + # in reality we wouldn't want to do this b/c we have just + # as many params as before + # print "D:", IDX + # self.theta = np.random.randn(IDX) / np.sqrt(IDX) + + def sa2x(self, s, a): + # NOTE: using just (r, c, r*c, u, d, l, r, 1) is not expressive enough + return np.array([ + s[0] - 1 if a == 'U' else 0, + s[1] - 1.5 if a == 'U' else 0, + (s[0]*s[1] - 3)/3 if a == 'U' else 0, + (s[0]*s[0] - 2)/2 if a == 'U' else 0, + (s[1]*s[1] - 4.5)/4.5 if a == 'U' else 0, + 1 if a == 'U' else 0, + s[0] - 1 if a == 'D' else 0, + s[1] - 1.5 if a == 'D' else 0, + (s[0]*s[1] - 3)/3 if a == 'D' else 0, + (s[0]*s[0] - 2)/2 if a == 'D' else 0, + (s[1]*s[1] - 4.5)/4.5 if a == 'D' else 0, + 1 if a == 'D' else 0, + s[0] - 1 if a == 'L' else 0, + s[1] - 1.5 if a == 'L' else 0, + (s[0]*s[1] - 3)/3 if a == 'L' else 0, + (s[0]*s[0] - 2)/2 if a == 'L' else 0, + (s[1]*s[1] - 4.5)/4.5 if a == 'L' else 0, + 1 if a == 'L' else 0, + s[0] - 1 if a == 'R' else 0, + s[1] - 1.5 if a == 'R' else 0, + (s[0]*s[1] - 3)/3 if a == 'R' else 0, + (s[0]*s[0] - 2)/2 if a == 'R' else 0, + (s[1]*s[1] - 4.5)/4.5 if a == 'R' else 0, + 1 if a == 'R' else 0, + 1 + ]) + # if we use SA2IDX, a one-hot encoding for every (s,a) pair + # in reality we wouldn't want to do this b/c we have just + # as many params as before + # x = np.zeros(len(self.theta)) + # idx = SA2IDX[s][a] + # x[idx] = 1 + # return x + + def predict(self, s, a): + x = self.sa2x(s, a) + return self.theta.dot(x) + + def grad(self, s, a): + return self.sa2x(s, a) + + +def getQs(model, s): + # we need Q(s,a) to choose an action + # i.e. a = argmax[a]{ Q(s,a) } + Qs = {} + for a in ALL_POSSIBLE_ACTIONS: + q_sa = model.predict(s, a) + Qs[a] = q_sa + return Qs + + +if __name__ == '__main__': + # NOTE: if we use the standard grid, there's a good chance we will end up with + # suboptimal policies + # e.g. + # --------------------------- + # R | R | R | | + # --------------------------- + # R* | | U | | + # --------------------------- + # U | R | U | L | + # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. + # we'll either end up staying in the same spot, or back to the start (2,0), at which + # point we whould then just go back up, or at (0,0), at which point we can continue + # on right. + # instead, let's penalize each movement so the agent will find a shorter route. + # + # grid = standard_grid() + grid = negative_grid(step_cost=-0.1) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # no policy initialization, we will derive our policy from most recent Q + # enumerate all (s,a) pairs, each will have its own weight in our "dumb" model + # essentially each weight will be a measure of Q(s,a) itself + states = grid.all_states() + for s in states: + SA2IDX[s] = {} + for a in ALL_POSSIBLE_ACTIONS: + SA2IDX[s][a] = IDX + IDX += 1 + + # initialize model + model = Model() + + # repeat until convergence + t = 1.0 + t2 = 1.0 + deltas = [] + for it in range(20000): + if it % 100 == 0: + t += 0.01 + t2 += 0.01 + if it % 1000 == 0: + print("it:", it) + alpha = ALPHA / t2 + + # instead of 'generating' an epsiode, we will PLAY + # an episode within this loop + s = (2, 0) # start state + grid.set_state(s) + + # get Q(s) so we can choose the first action + Qs = getQs(model, s) + + # the first (s, r) tuple is the state we start in and 0 + # (since we don't get a reward) for simply starting the game + # the last (s, r) tuple is the terminal state and the final reward + # the value for the terminal state is by definition 0, so we don't + # care about updating it. + a = max_dict(Qs)[0] + a = random_action(a, eps=0.5/t) # epsilon-greedy + biggest_change = 0 + while not grid.game_over(): + r = grid.move(a) + s2 = grid.current_state() + + # we need the next action as well since Q(s,a) depends on Q(s',a') + # if s2 not in policy then it's a terminal state, all Q are 0 + old_theta = model.theta.copy() + if grid.is_terminal(s2): + model.theta += alpha*(r - model.predict(s, a))*model.grad(s, a) + else: + # not terminal + Qs2 = getQs(model, s2) + a2, maxQs2a2 = max_dict(Qs2) + a2 = random_action(a2, eps=0.5/t) # epsilon-greedy + + # we will update Q(s,a) AS we experience the episode + model.theta += alpha*(r + GAMMA*maxQs2a2 - model.predict(s, a))*model.grad(s, a) + + # next state becomes current state + s = s2 + a = a2 + + biggest_change = max(biggest_change, np.abs(model.theta - old_theta).sum()) + deltas.append(biggest_change) + + plt.plot(deltas) + plt.show() + + # determine the policy from Q* + # find V* from Q* + policy = {} + V = {} + Q = {} + for s in grid.actions.keys(): + Qs = getQs(model, s) + Q[s] = Qs + a, max_q = max_dict(Qs) + policy[s] = a + V[s] = max_q + + print("values:") + print_values(V, grid) + print("policy:") + print_policy(policy, grid) diff --git a/rl/q_learning.py b/rl/q_learning.py index 898069d8..ace032d6 100644 --- a/rl/q_learning.py +++ b/rl/q_learning.py @@ -105,6 +105,7 @@ # next state becomes current state s = s2 + a = a2 deltas.append(biggest_change) From 3a857874fa947d4c93bdcde61a5b96f5ff0ac383 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 27 May 2018 16:35:18 -0400 Subject: [PATCH 035/329] separate out tf version --- nlp_class2/glove_tf.py | 202 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 nlp_class2/glove_tf.py diff --git a/nlp_class2/glove_tf.py b/nlp_class2/glove_tf.py new file mode 100644 index 00000000..c86afff1 --- /dev/null +++ b/nlp_class2/glove_tf.py @@ -0,0 +1,202 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +import json +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt + +from datetime import datetime +from sklearn.utils import shuffle +from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab + + +class Glove: + def __init__(self, D, V, context_sz): + self.D = D + self.V = V + self.context_sz = context_sz + + def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False, use_theano=False, use_tensorflow=False): + # build co-occurrence matrix + # paper calls it X, so we will call it X, instead of calling + # the training data X + # TODO: would it be better to use a sparse matrix? + t0 = datetime.now() + V = self.V + D = self.D + + if not os.path.exists(cc_matrix): + X = np.zeros((V, V)) + N = len(sentences) + print("number of sentences to process:", N) + it = 0 + for sentence in sentences: + it += 1 + if it % 10000 == 0: + print("processed", it, "/", N) + n = len(sentence) + for i in range(n): + # i is not the word index!!! + # j is not the word index!!! + # i just points to which element of the sequence (sentence) we're looking at + wi = sentence[i] + + start = max(0, i - self.context_sz) + end = min(n, i + self.context_sz) + + # we can either choose only one side as context, or both + # here we are doing both + + # make sure "start" and "end" tokens are part of some context + # otherwise their f(X) will be 0 (denominator in bias update) + if i - self.context_sz < 0: + points = 1.0 / (i + 1) + X[wi,0] += points + X[0,wi] += points + if i + self.context_sz > n: + points = 1.0 / (n - i) + X[wi,1] += points + X[1,wi] += points + + # left side + for j in range(start, i): + wj = sentence[j] + points = 1.0 / (i - j) # this is +ve + X[wi,wj] += points + X[wj,wi] += points + + # right side + for j in range(i + 1, end): + wj = sentence[j] + points = 1.0 / (j - i) # this is +ve + X[wi,wj] += points + X[wj,wi] += points + + # save the cc matrix because it takes forever to create + np.save(cc_matrix, X) + else: + X = np.load(cc_matrix) + + print("max in X:", X.max()) + + # weighting + fX = np.zeros((V, V)) + fX[X < xmax] = (X[X < xmax] / float(xmax)) ** alpha + fX[X >= xmax] = 1 + + print("max in f(X):", fX.max()) + + # target + logX = np.log(X + 1) + + print("max in log(X):", logX.max()) + + print("time to build co-occurrence matrix:", (datetime.now() - t0)) + + # initialize weights + W = np.random.randn(V, D) / np.sqrt(V + D) + b = np.zeros(V) + U = np.random.randn(V, D) / np.sqrt(V + D) + c = np.zeros(V) + mu = logX.mean() + + # initialize weights, inputs, targets placeholders + tfW = tf.Variable(W.astype(np.float32)) + tfb = tf.Variable(b.reshape(V, 1).astype(np.float32)) + tfU = tf.Variable(U.astype(np.float32)) + tfc = tf.Variable(c.reshape(1, V).astype(np.float32)) + tfLogX = tf.placeholder(tf.float32, shape=(V, V)) + tffX = tf.placeholder(tf.float32, shape=(V, V)) + + delta = tf.matmul(tfW, tf.transpose(tfU)) + tfb + tfc + mu - tfLogX + cost = tf.reduce_sum(tffX * delta * delta) + regularized_cost = cost + for param in (tfW, tfb, tfU, tfc): + regularized_cost += reg*tf.reduce_sum(param * param) + + train_op = tf.train.MomentumOptimizer( + learning_rate, + momentum=0.9 + ).minimize(regularized_cost) + # train_op = tf.train.AdamOptimizer(1e-3).minimize(regularized_cost) + init = tf.global_variables_initializer() + session = tf.InteractiveSession() + session.run(init) + + costs = [] + sentence_indexes = range(len(sentences)) + for epoch in range(epochs): + c, _ = session.run((cost, train_op), feed_dict={tfLogX: logX, tffX: fX}) + print("epoch:", epoch, "cost:", c) + costs.append(c) + + # save for future calculations + self.W, self.U = session.run([tfW, tfU]) + + plt.plot(costs) + plt.show() + + def save(self, fn): + # function word_analogies expects a (V,D) matrx and a (D,V) matrix + arrays = [self.W, self.U.T] + np.savez(fn, *arrays) + + +def main(we_file, w2i_file, use_brown=True, n_files=50): + if use_brown: + cc_matrix = "cc_matrix_brown.npy" + else: + cc_matrix = "cc_matrix_%s.npy" % n_files + + # hacky way of checking if we need to re-load the raw data or not + # remember, only the co-occurrence matrix is needed for training + if os.path.exists(cc_matrix): + with open(w2i_file) as f: + word2idx = json.load(f) + sentences = [] # dummy - we won't actually use it + else: + if use_brown: + keep_words = set([ + 'king', 'man', 'woman', + 'france', 'paris', 'london', 'rome', 'italy', 'britain', 'england', + 'french', 'english', 'japan', 'japanese', 'chinese', 'italian', + 'australia', 'australian', 'december', 'november', 'june', + 'january', 'february', 'march', 'april', 'may', 'july', 'august', + 'september', 'october', + ]) + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=5000, keep_words=keep_words) + else: + sentences, word2idx = get_wikipedia_data(n_files=n_files, n_vocab=2000) + + with open(w2i_file, 'w') as f: + json.dump(word2idx, f) + + V = len(word2idx) + model = Glove(100, V, 10) + model.fit(sentences, cc_matrix=cc_matrix, epochs=200) + model.save(we_file) + + +if __name__ == '__main__': + we = 'glove_model_50.npz' + w2i = 'glove_word2idx_50.json' + main(we, w2i, use_brown=False) + for concat in (True, False): + print("** concat:", concat) + find_analogies('king', 'man', 'woman', concat, we, w2i) + find_analogies('france', 'paris', 'london', concat, we, w2i) + find_analogies('france', 'paris', 'rome', concat, we, w2i) + find_analogies('paris', 'france', 'italy', concat, we, w2i) + find_analogies('france', 'french', 'english', concat, we, w2i) + find_analogies('japan', 'japanese', 'chinese', concat, we, w2i) + find_analogies('japan', 'japanese', 'italian', concat, we, w2i) + find_analogies('japan', 'japanese', 'australian', concat, we, w2i) + find_analogies('december', 'november', 'june', concat, we, w2i) From a4718a0a1e6535371189167c82d3ae7b82237bf4 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 27 May 2018 16:57:06 -0400 Subject: [PATCH 036/329] split out theano --- nlp_class2/glove_theano.py | 242 +++++++++++++++++++++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 nlp_class2/glove_theano.py diff --git a/nlp_class2/glove_theano.py b/nlp_class2/glove_theano.py new file mode 100644 index 00000000..1ab9a455 --- /dev/null +++ b/nlp_class2/glove_theano.py @@ -0,0 +1,242 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +import json +import numpy as np +import theano +import theano.tensor as T +import matplotlib.pyplot as plt + +from datetime import datetime +from sklearn.utils import shuffle +from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab + +# using ALS, what's the least # files to get correct analogies? +# use this for word2vec training to make it faster +# first tried 20 files --> not enough +# how about 30 files --> some correct but still not enough +# 40 files --> half right but 50 is better + + +def momentum_updates(cost, params, lr=1e-4, mu=0.9): + grads = T.grad(cost, params) + velocities = [theano.shared( + np.zeros_like(p.get_value()).astype(np.float32) + ) for p in params] + # updates = [(p, p - learning_rate*g) for p, g in zip(params, grads)] + updates = [] + for p, v, g in zip(params, velocities, grads): + newv = mu*v - lr*g + newp = p + newv + updates.append((p, newp)) + updates.append((v, newv)) + return updates + + +class Glove: + def __init__(self, D, V, context_sz): + self.D = D + self.V = V + self.context_sz = context_sz + + def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False, use_theano=False, use_tensorflow=False): + # build co-occurrence matrix + # paper calls it X, so we will call it X, instead of calling + # the training data X + # TODO: would it be better to use a sparse matrix? + t0 = datetime.now() + V = self.V + D = self.D + + if not os.path.exists(cc_matrix): + X = np.zeros((V, V)) + N = len(sentences) + print("number of sentences to process:", N) + it = 0 + for sentence in sentences: + it += 1 + if it % 10000 == 0: + print("processed", it, "/", N) + n = len(sentence) + for i in range(n): + # i is not the word index!!! + # j is not the word index!!! + # i just points to which element of the sequence (sentence) we're looking at + wi = sentence[i] + + start = max(0, i - self.context_sz) + end = min(n, i + self.context_sz) + + # we can either choose only one side as context, or both + # here we are doing both + + # make sure "start" and "end" tokens are part of some context + # otherwise their f(X) will be 0 (denominator in bias update) + if i - self.context_sz < 0: + points = 1.0 / (i + 1) + X[wi,0] += points + X[0,wi] += points + if i + self.context_sz > n: + points = 1.0 / (n - i) + X[wi,1] += points + X[1,wi] += points + + # left side + for j in range(start, i): + wj = sentence[j] + points = 1.0 / (i - j) # this is +ve + X[wi,wj] += points + X[wj,wi] += points + + # right side + for j in range(i + 1, end): + wj = sentence[j] + points = 1.0 / (j - i) # this is +ve + X[wi,wj] += points + X[wj,wi] += points + + # save the cc matrix because it takes forever to create + np.save(cc_matrix, X) + else: + X = np.load(cc_matrix) + + print("max in X:", X.max()) + + # weighting + fX = np.zeros((V, V)) + fX[X < xmax] = (X[X < xmax] / float(xmax)) ** alpha + fX[X >= xmax] = 1 + + print("max in f(X):", fX.max()) + + # target + logX = np.log(X + 1) + + # cast + fX = fX.astype(np.float32) + logX = logX.astype(np.float32) + + print("max in log(X):", logX.max()) + + print("time to build co-occurrence matrix:", (datetime.now() - t0)) + + # initialize weights + W = np.random.randn(V, D) / np.sqrt(V + D) + b = np.zeros(V) + U = np.random.randn(V, D) / np.sqrt(V + D) + c = np.zeros(V) + mu = logX.mean() + + # initialize weights, inputs, targets placeholders + thW = theano.shared(W.astype(np.float32)) + thb = theano.shared(b.astype(np.float32)) + thU = theano.shared(U.astype(np.float32)) + thc = theano.shared(c.astype(np.float32)) + thLogX = T.matrix('logX') + thfX = T.matrix('fX') + + params = [thW, thb, thU, thc] + + thDelta = thW.dot(thU.T) + T.reshape(thb, (V, 1)) + T.reshape(thc, (1, V)) + mu - thLogX + thCost = ( thfX * thDelta * thDelta ).sum() + + # regularization + regularized_cost = thCost + reg*((thW * thW).sum() + (thU * thU).sum()) + + # grads = T.grad(regularized_cost, params) + # updates = [(p, p - learning_rate*g) for p, g in zip(params, grads)] + updates = momentum_updates(regularized_cost, params, learning_rate) + + train_op = theano.function( + inputs=[thfX, thLogX], + updates=updates, + ) + + cost_op = theano.function(inputs=[thfX, thLogX], outputs=thCost) + + costs = [] + sentence_indexes = range(len(sentences)) + for epoch in range(epochs): + train_op(fX, logX) + cost = cost_op(fX, logX) + costs.append(cost) + print("epoch:", epoch, "cost:", cost) + + + self.W = thW.get_value() + self.U = thU.get_value() + + plt.plot(costs) + plt.show() + + def save(self, fn): + # function word_analogies expects a (V,D) matrx and a (D,V) matrix + arrays = [self.W, self.U.T] + np.savez(fn, *arrays) + + +def main(we_file, w2i_file, use_brown=True, n_files=50): + if use_brown: + cc_matrix = "cc_matrix_brown.npy" + else: + cc_matrix = "cc_matrix_%s.npy" % n_files + + # hacky way of checking if we need to re-load the raw data or not + # remember, only the co-occurrence matrix is needed for training + if os.path.exists(cc_matrix): + with open(w2i_file) as f: + word2idx = json.load(f) + sentences = [] # dummy - we won't actually use it + else: + if use_brown: + keep_words = set([ + 'king', 'man', 'woman', + 'france', 'paris', 'london', 'rome', 'italy', 'britain', 'england', + 'french', 'english', 'japan', 'japanese', 'chinese', 'italian', + 'australia', 'australian', 'december', 'november', 'june', + 'january', 'february', 'march', 'april', 'may', 'july', 'august', + 'september', 'october', + ]) + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=5000, keep_words=keep_words) + else: + sentences, word2idx = get_wikipedia_data(n_files=n_files, n_vocab=2000) + + with open(w2i_file, 'w') as f: + json.dump(word2idx, f) + + V = len(word2idx) + model = Glove(100, V, 10) + model.fit( + sentences, + cc_matrix=cc_matrix, + learning_rate=1e-4, + reg=0.1, + epochs=200, + ) + model.save(we_file) + + +if __name__ == '__main__': + we = 'glove_model_50.npz' + w2i = 'glove_word2idx_50.json' + # we = 'glove_model_brown.npz' + # w2i = 'glove_word2idx_brown.json' + main(we, w2i, use_brown=False) + for concat in (True, False): + print("** concat:", concat) + find_analogies('king', 'man', 'woman', concat, we, w2i) + find_analogies('france', 'paris', 'london', concat, we, w2i) + find_analogies('france', 'paris', 'rome', concat, we, w2i) + find_analogies('paris', 'france', 'italy', concat, we, w2i) + find_analogies('france', 'french', 'english', concat, we, w2i) + find_analogies('japan', 'japanese', 'chinese', concat, we, w2i) + find_analogies('japan', 'japanese', 'italian', concat, we, w2i) + find_analogies('japan', 'japanese', 'australian', concat, we, w2i) + find_analogies('december', 'november', 'june', concat, we, w2i) From 2ca61f71afc8c98b00898a2bde5d9d7b5263fb96 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 27 May 2018 17:22:25 -0400 Subject: [PATCH 037/329] update --- nlp_class2/glove.py | 146 +++++++++++-------------------------- nlp_class2/glove_theano.py | 9 --- 2 files changed, 44 insertions(+), 111 deletions(-) diff --git a/nlp_class2/glove.py b/nlp_class2/glove.py index cd035dc4..10f370d8 100644 --- a/nlp_class2/glove.py +++ b/nlp_class2/glove.py @@ -10,9 +10,6 @@ import os import json import numpy as np -import theano -import theano.tensor as T -import tensorflow as tf import matplotlib.pyplot as plt from datetime import datetime @@ -115,50 +112,6 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, c = np.zeros(V) mu = logX.mean() - if use_theano: - # initialize weights, inputs, targets placeholders - thW = theano.shared(W) - thb = theano.shared(b) - thU = theano.shared(U) - thc = theano.shared(c) - thLogX = T.matrix('logX') - thfX = T.matrix('fX') - - params = [thW, thb, thU, thc] - - thDelta = thW.dot(thU.T) + T.reshape(thb, (V, 1)) + T.reshape(thc, (1, V)) + mu - thLogX - thCost = ( thfX * thDelta * thDelta ).sum() - - # regularization - thCost += reg*( (thW * thW).sum() + (thU * thU).sum() + (thb * thb).sum() + (thc * thc).sum()) - - grads = T.grad(thCost, params) - - updates = [(p, p - learning_rate*g) for p, g in zip(params, grads)] - - train_op = theano.function( - inputs=[thfX, thLogX], - updates=updates, - ) - - elif use_tensorflow: - # initialize weights, inputs, targets placeholders - tfW = tf.Variable(W.astype(np.float32)) - tfb = tf.Variable(b.reshape(V, 1).astype(np.float32)) - tfU = tf.Variable(U.astype(np.float32)) - tfc = tf.Variable(c.reshape(1, V).astype(np.float32)) - tfLogX = tf.placeholder(tf.float32, shape=(V, V)) - tffX = tf.placeholder(tf.float32, shape=(V, V)) - - delta = tf.matmul(tfW, tf.transpose(tfU)) + tfb + tfc + mu - tfLogX - cost = tf.reduce_sum(tffX * delta * delta) - for param in (tfW, tfb, tfU, tfc): - cost += reg*tf.reduce_sum(param * param) - - train_op = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(cost) - init = tf.global_variables_initializer() - session = tf.InteractiveSession() - session.run(init) costs = [] sentence_indexes = range(len(sentences)) @@ -170,51 +123,38 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, if gd: # gradient descent method + # update W + # oldW = W.copy() + for i in range(V): + # for j in range(V): + # W[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*U[j] + W[i] -= learning_rate*(fX[i,:]*delta[i,:]).dot(U) + W -= learning_rate*reg*W + # print "updated W" + + # update b + for i in range(V): + # for j in range(V): + # b[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j]) + b[i] -= learning_rate*fX[i,:].dot(delta[i,:]) + # b -= learning_rate*reg*b + # print "updated b" + + # update U + for j in range(V): + # for i in range(V): + # U[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*W[i] + U[j] -= learning_rate*(fX[:,j]*delta[:,j]).dot(W) + U -= learning_rate*reg*U + # print "updated U" - if use_theano: - train_op(fX, logX) - W = thW.get_value() - b = thb.get_value() - U = thU.get_value() - c = thc.get_value() - - elif use_tensorflow: - session.run(train_op, feed_dict={tfLogX: logX, tffX: fX}) - W, b, U, c = session.run([tfW, tfb, tfU, tfc]) - - else: - # update W - oldW = W.copy() - for i in range(V): - # for j in range(V): - # W[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*U[j] - W[i] -= learning_rate*(fX[i,:]*delta[i,:]).dot(U) - W -= learning_rate*reg*W - # print "updated W" - - # update b - for i in range(V): - # for j in range(V): - # b[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j]) - b[i] -= learning_rate*fX[i,:].dot(delta[i,:]) - b -= learning_rate*reg*b - # print "updated b" - - # update U - for j in range(V): - # for i in range(V): - # U[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*W[i] - U[j] -= learning_rate*(fX[:,j]*delta[:,j]).dot(oldW) - U -= learning_rate*reg*U - # print "updated U" - - # update c - for j in range(V): - # for i in range(V): - # c[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j]) - c[j] -= learning_rate*fX[:,j].dot(delta[:,j]) - c -= learning_rate*reg*c - # print "updated c" + # update c + for j in range(V): + # for i in range(V): + # c[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j]) + c[j] -= learning_rate*fX[:,j].dot(delta[:,j]) + # c -= learning_rate*reg*c + # print "updated c" else: # ALS method @@ -321,17 +261,19 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): V = len(word2idx) model = Glove(100, V, 10) - model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # ALS - # model.fit( - # sentences, - # cc_matrix=cc_matrix, - # learning_rate=3e-4, - # reg=0.1, - # epochs=10, - # gd=True, - # use_theano=False, - # use_tensorflow=True, - # ) + + # alternating least squares method + # model.fit(sentences, cc_matrix=cc_matrix, epochs=20) + + # gradient descent method + model.fit( + sentences, + cc_matrix=cc_matrix, + learning_rate=5e-4, + reg=0.1, + epochs=500, + gd=True, + ) model.save(we_file) diff --git a/nlp_class2/glove_theano.py b/nlp_class2/glove_theano.py index 1ab9a455..33705ca8 100644 --- a/nlp_class2/glove_theano.py +++ b/nlp_class2/glove_theano.py @@ -18,19 +18,12 @@ from sklearn.utils import shuffle from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab -# using ALS, what's the least # files to get correct analogies? -# use this for word2vec training to make it faster -# first tried 20 files --> not enough -# how about 30 files --> some correct but still not enough -# 40 files --> half right but 50 is better - def momentum_updates(cost, params, lr=1e-4, mu=0.9): grads = T.grad(cost, params) velocities = [theano.shared( np.zeros_like(p.get_value()).astype(np.float32) ) for p in params] - # updates = [(p, p - learning_rate*g) for p, g in zip(params, grads)] updates = [] for p, v, g in zip(params, velocities, grads): newv = mu*v - lr*g @@ -150,8 +143,6 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # regularization regularized_cost = thCost + reg*((thW * thW).sum() + (thU * thU).sum()) - # grads = T.grad(regularized_cost, params) - # updates = [(p, p - learning_rate*g) for p, g in zip(params, grads)] updates = momentum_updates(regularized_cost, params, learning_rate) train_op = theano.function( From 560d53de8961a1c195e033732b2343fe3b020389 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 29 May 2018 04:11:39 -0400 Subject: [PATCH 038/329] update --- nlp_class2/pmi.py | 296 ++++++++++++++++++++++++++++++++++++++ nlp_class2/rntn_theano.py | 62 ++++++-- 2 files changed, 344 insertions(+), 14 deletions(-) create mode 100644 nlp_class2/pmi.py diff --git a/nlp_class2/pmi.py b/nlp_class2/pmi.py new file mode 100644 index 00000000..13b5e1d6 --- /dev/null +++ b/nlp_class2/pmi.py @@ -0,0 +1,296 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import os, sys +import string +import numpy as np +import matplotlib.pyplot as plt +from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz +from scipy.spatial.distance import cosine as cos_dist +from sklearn.metrics.pairwise import pairwise_distances +from glob import glob + + +# input files +files = glob('../large_files/enwiki*.txt') + + +# unfortunately these work different ways +def remove_punctuation_2(s): + return s.translate(None, string.punctuation) + +def remove_punctuation_3(s): + return s.translate(str.maketrans('','',string.punctuation)) + +if sys.version.startswith('2'): + remove_punctuation = remove_punctuation_2 +else: + remove_punctuation = remove_punctuation_3 + + +# max vocab size +V = 2000 + +# context size +context_size = 10 + +# word counts +all_word_counts = {} + +# get the top V words +num_lines = 0 +num_tokens = 0 +for f in files: + for line in open(f): + # don't count headers, structured data, lists, etc... + if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'): + num_lines += 1 + for word in remove_punctuation(line).lower().split(): + num_tokens += 1 + if word not in all_word_counts: + all_word_counts[word] = 0 + all_word_counts[word] += 1 +print("num_lines:", num_lines) +print("num_tokens:", num_tokens) + + +# words I really want to keep +keep_words = [ + 'king', 'man', 'queen', 'woman', + 'heir', 'heiress', 'prince', 'princess', + 'nephew', 'niece', 'uncle', 'aunt', + 'husband', 'wife', 'brother', 'sister', + 'tokyo', 'beijing', 'dallas', 'texas', + 'january', 'february', 'march', + 'april', 'may', 'june', + 'july', 'august', 'september', + 'october', 'november', 'december', + 'actor', 'actress', + 'rice', 'bread', 'miami', 'florida', + 'walk', 'walking', 'swim', 'swimming', +] +for w in keep_words: + all_word_counts[w] = float('inf') + + +# sort in descending order +all_word_counts = sorted(all_word_counts.items(), key=lambda x: x[1], reverse=True) + +# keep just the top V words +# save a slot for +V = min(V, len(all_word_counts)) +top_words = [w for w, count in all_word_counts[:V-1]] + [''] +# TODO: try it without UNK at all + +# reverse the array to get word2idx mapping +word2idx = {w:i for i, w in enumerate(top_words)} +unk = word2idx[''] + +# for w in ('king', 'man', 'queen', 'woman', 'france', 'paris', \ +# 'london', 'england', 'italy', 'rome', \ +# 'france', 'french', 'english', 'england', \ +# 'japan', 'japanese', 'chinese', 'china', \ +# 'italian', 'australia', 'australian' \ +# 'japan', 'tokyo', 'china', 'beijing'): +# assert(w in word2idx) + + +if not os.path.exists('pmi_counts_%s.npz' % V): + # init counts + wc_counts = lil_matrix((V, V)) + + ### make PMI matrix + # add counts + k = 0 + # for line in open('../large_files/text8'): + for f in files: + for line in open(f): + # don't count headers, structured data, lists, etc... + if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'): + line_as_idx = [] + for word in remove_punctuation(line).lower().split(): + if word in word2idx: + idx = word2idx[word] + # line_as_idx.append(idx) + else: + idx = unk + # pass + line_as_idx.append(idx) + + for i, w in enumerate(line_as_idx): + # keep count + k += 1 + if k % 10000 == 0: + print("%s/%s" % (k, num_tokens)) + + start = max(0, i - context_size) + end = min(len(line_as_idx), i + context_size) + for c in line_as_idx[start:end]: + wc_counts[w, c] += 1 + print("Finished counting") + + save_npz('pmi_counts_%s.npz' % V, csr_matrix(wc_counts)) + +else: + wc_counts = load_npz('pmi_counts_%s.npz' % V) + + +# context counts get raised ^ 0.75 +c_counts = wc_counts.sum(axis=0).A.flatten() ** 0.75 +c_probs = c_counts / c_counts.sum() +c_probs = c_probs.reshape(1, V) + + +# PMI(w, c) = #(w, c) / #(w) / p(c) +pmi = wc_counts / wc_counts.sum(axis=1) / c_probs +print("type(pmi):", type(pmi)) +logX = np.log(pmi.A + 1) #+ np.log(100) +print("type(logX):", type(logX)) +logX[logX < 0] = 0 + + +### do alternating least squares + + +# latent dimension +D = 50 +reg = 0. + + +# initialize weights +W = np.random.randn(V, D) / np.sqrt(V + D) +b = np.zeros(V) +U = np.random.randn(V, D) / np.sqrt(V + D) +c = np.zeros(V) +mu = logX.mean() + + +costs = [] +for epoch in range(10): + print("epoch:", epoch) + delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX + cost = ( delta * delta ).sum() + costs.append(cost) + + # update W + for i in range(V): + matrix = reg*np.eye(D) + U.T.dot(U) + vector = (logX[i,:] - b[i] - c - mu).dot(U) + W[i] = np.linalg.solve(matrix, vector) + + # update b + for i in range(V): + numerator = (logX[i,:] - W[i].dot(U.T) - c - mu).sum() + b[i] = numerator / V #/ (1 + reg) + + # update U + for j in range(V): + matrix = reg*np.eye(D) + W.T.dot(W) + vector = (logX[:,j] - b - c[j] - mu).dot(W) + U[j] = np.linalg.solve(matrix, vector) + + # update c + for j in range(V): + numerator = (logX[:,j] - W.dot(U[j]) - b - mu).sum() + c[j] = numerator / V #/ (1 + reg) + +plt.plot(costs) +plt.show() + + + + +### test it +king = W[word2idx['king']] +man = W[word2idx['man']] +queen = W[word2idx['queen']] +woman = W[word2idx['woman']] + +vec = king - man + woman + +# find closest +# closest = None +# min_dist = float('inf') +# for i in range(len(W)): +# dist = cos_dist(W[i], vec) +# if dist < min_dist: +# closest = i +# min_dist = dist + +# set word embedding matrix +# W = (W + U) / 2 + +distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V) +idx = distances.argsort()[:10] + +print("closest 10:") +for i in idx: + print(top_words[i], distances[i]) + +print("dist to queen:", cos_dist(W[word2idx['queen']], vec)) + + + +def analogy(pos1, neg1, pos2, neg2): + # don't actually use pos2 in calculation, just print what's expected + print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2)) + for w in (pos1, neg1, pos2, neg2): + if w not in word2idx: + print("Sorry, %s not in word2idx" % w) + return + + p1 = W[word2idx[pos1]] + n1 = W[word2idx[neg1]] + p2 = W[word2idx[pos2]] + n2 = W[word2idx[neg2]] + + vec = p1 - n1 + n2 + + distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V) + idx = distances.argsort()[:10] + + # pick the best that's not p1, n1, or n2 + best_idx = -1 + keep_out = [word2idx[w] for w in (pos1, neg1, neg2)] + for i in idx: + if i not in keep_out: + best_idx = i + break + + print("got: %s - %s = %s - %s" % (pos1, neg1, top_words[best_idx], neg2)) + print("closest 10:") + for i in idx: + print(top_words[i], distances[i]) + + print("dist to %s:" % pos2, cos_dist(p2, vec)) + + +analogy('king', 'man', 'queen', 'woman') +analogy('miami', 'florida', 'dallas', 'texas') +# analogy('einstein', 'scientist', 'picasso', 'painter') +analogy('china', 'rice', 'england', 'bread') +analogy('man', 'woman', 'he', 'she') +analogy('man', 'woman', 'uncle', 'aunt') +analogy('man', 'woman', 'brother', 'sister') +analogy('man', 'woman', 'husband', 'wife') +analogy('man', 'woman', 'actor', 'actress') +analogy('man', 'woman', 'father', 'mother') +analogy('heir', 'heiress', 'prince', 'princess') +analogy('nephew', 'niece', 'uncle', 'aunt') +analogy('france', 'paris', 'japan', 'tokyo') +analogy('france', 'paris', 'china', 'beijing') +analogy('february', 'january', 'december', 'november') +analogy('france', 'paris', 'italy', 'rome') +analogy('paris', 'france', 'rome', 'italy') +analogy('france', 'french', 'england', 'english') +analogy('japan', 'japanese', 'china', 'chinese') +analogy('japan', 'japanese', 'italy', 'italian') +analogy('japan', 'japanese', 'australia', 'australian') +analogy('walk', 'walking', 'swim', 'swimming') diff --git a/nlp_class2/rntn_theano.py b/nlp_class2/rntn_theano.py index d4fbb41d..3949a3c8 100644 --- a/nlp_class2/rntn_theano.py +++ b/nlp_class2/rntn_theano.py @@ -88,7 +88,7 @@ def __init__(self, V, D, K, activation=T.tanh): self.K = K self.f = activation - def fit(self, trees, reg=1e-3, epochs=8, train_inner_nodes=False): + def fit(self, trees, test_trees, reg=1e-3, epochs=8, train_inner_nodes=False): D = self.D V = self.V K = self.K @@ -156,7 +156,8 @@ def recurrence(n, hiddens, words, left, right): rcost = reg*T.sum([(p*p).sum() for p in self.params]) if train_inner_nodes: - cost = -T.mean(T.log(py_x[T.arange(labels.shape[0]), labels])) + rcost + relevant_labels = labels[labels >= 0] + cost = -T.mean(T.log(py_x[labels >= 0, relevant_labels])) + rcost else: cost = -T.mean(T.log(py_x[-1, labels[-1]])) + rcost @@ -178,14 +179,15 @@ def recurrence(n, hiddens, words, left, right): lr_ = 8e-3 # initial learning rate costs = [] sequence_indexes = range(N) - if train_inner_nodes: - n_total = sum(len(words) for words, _, _, _ in trees) - else: - n_total = N + # if train_inner_nodes: + # n_total = sum(len(words) for words, _, _, _ in trees) + # else: + # n_total = N for i in range(epochs): t0 = datetime.now() sequence_indexes = shuffle(sequence_indexes) n_correct = 0 + n_total = 0 cost = 0 it = 0 for j in sequence_indexes: @@ -198,20 +200,28 @@ def recurrence(n, hiddens, words, left, right): print(p.get_value().sum()) exit() cost += c - if train_inner_nodes: - n_correct += np.sum(p == lab) - else: - n_correct += (p[-1] == lab[-1]) + n_correct += (p[-1] == lab[-1]) + n_total += 1 it += 1 - if it % 1 == 0: + if it % 10 == 0: sys.stdout.write( "j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % (it, N, float(n_correct)/n_total, cost) ) sys.stdout.flush() + + # calculate the test score + n_test_correct = 0 + n_test_total = 0 + for words, left, right, lab in test_trees: + _, p = self.cost_predict_op(words, left, right, lab) + n_test_correct += (p[-1] == lab[-1]) + n_test_total += 1 + print( "i:", i, "cost:", cost, - "correct rate:", (float(n_correct)/n_total), + "train acc:", float(n_correct)/n_total, + "test acc:", float(n_test_correct)/n_test_total, "time for epoch:", (datetime.now() - t0) ) costs.append(cost) @@ -272,6 +282,7 @@ def tree2list(tree, parent_idx, is_binary=False): if tree.label > 2: label = 1 elif tree.label < 2: + # else: label = 0 else: label = -1 # we will eventually filter these out @@ -297,12 +308,35 @@ def main(is_binary=True): if is_binary: test = [t for t in test if t[3][-1] >= 0] # for filtering binary labels + # check imbalance + # pos = 0 + # neg = 0 + # mid = 0 + # label_counts = np.zeros(5) + # for t in train + test: + # words, left_child, right_child, labels = t + # # for l in labels: + # # if l == 0: + # # neg += 1 + # # elif l == 1: + # # pos += 1 + # # else: + # # mid += 1 + # for l in labels: + # label_counts[l] += 1 + # # print("pos / total:", float(pos) / (pos + neg + mid)) + # # print("mid / total:", float(mid) / (pos + neg + mid)) + # # print("neg / total:", float(neg) / (pos + neg + mid)) + # print("label proportions:", label_counts / label_counts.sum()) + # exit() + + train = shuffle(train) # train = train[:5000] # n_pos = sum(t[3][-1] for t in train) # print("n_pos train:", n_pos) test = shuffle(test) - test = test[:1000] + smalltest = test[:1000] # n_pos = sum(t[3][-1] for t in test) # print("n_pos test:", n_pos) @@ -312,7 +346,7 @@ def main(is_binary=True): K = 2 if is_binary else 5 model = RecursiveNN(V, D, K) - model.fit(train) + model.fit(train, smalltest, epochs=20, train_inner_nodes=True) print("train accuracy:", model.score(train)) print("test accuracy:", model.score(test)) print("train f1:", model.f1_score(train)) From dc18466ecd13495fb4757c72da44b71cf2c417f7 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 29 May 2018 04:12:03 -0400 Subject: [PATCH 039/329] update --- nlp_class2/pretrained.py | 113 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 nlp_class2/pretrained.py diff --git a/nlp_class2/pretrained.py b/nlp_class2/pretrained.py new file mode 100644 index 00000000..5f616542 --- /dev/null +++ b/nlp_class2/pretrained.py @@ -0,0 +1,113 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +# WHERE TO GET THE VECTORS: +# GloVe: https://nlp.stanford.edu/projects/glove/ + +import numpy as np +from sklearn.metrics.pairwise import pairwise_distances + + +def dist1(a, b): + return np.linalg.norm(a - b) +def dist2(a, b): + return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b)) + +# pick a distance type +dist, metric = dist2, 'cosine' +# dist, metric = dist1, 'euclidean' + + +## more intuitive +# def find_analogies(w1, w2, w3): +# for w in (w1, w2, w3): +# if w not in word2vec: +# print("%s not in dictionary" % w) + +# king = word2vec[w1] +# man = word2vec[w2] +# woman = word2vec[w3] +# v0 = king - man + woman + +# min_dist = float('inf') +# best_word = '' +# for word, v1 in iteritems(word2vec): +# if word not in (w1, w2, w3): +# d = dist(v0, v1) +# if d < min_dist: +# min_dist = d +# best_word = word +# print(w1, "-", w2, "=", best_word, "-", w3) + + +## faster +def find_analogies(w1, w2, w3): + for w in (w1, w2, w3): + if w not in word2vec: + print("%s not in dictionary" % w) + + king = word2vec[w1] + man = word2vec[w2] + woman = word2vec[w3] + v0 = king - man + woman + + distances = pairwise_distances(v0.reshape(1, D), embedding, metric=metric).reshape(V) + idx = distances.argmin() + best_word = idx2word[idx] + + print(w1, "-", w2, "=", best_word, "-", w3) + + +# load in pre-trained word vectors +print('Loading word vectors...') +word2vec = {} +embedding = [] +idx2word = [] +with open('../large_files/glove.6B/glove.6B.50d.txt') as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec + embedding.append(vec) + idx2word.append(word) +print('Found %s word vectors.' % len(word2vec)) +embedding = np.array(embedding) +V, D = embedding.shape + + +find_analogies('king', 'man', 'woman') +find_analogies('france', 'paris', 'london') +find_analogies('france', 'paris', 'rome') +find_analogies('paris', 'france', 'italy') +find_analogies('france', 'french', 'english') +find_analogies('japan', 'japanese', 'chinese') +find_analogies('japan', 'japanese', 'italian') +find_analogies('japan', 'japanese', 'australian') +find_analogies('december', 'november', 'june') +find_analogies('miami', 'florida', 'texas') +find_analogies('einstein', 'scientist', 'painter') +find_analogies('china', 'rice', 'bread') +find_analogies('man', 'woman', 'she') +find_analogies('man', 'woman', 'aunt') +find_analogies('man', 'woman', 'sister') +find_analogies('man', 'woman', 'wife') +find_analogies('man', 'woman', 'actress') +find_analogies('man', 'woman', 'mother') +find_analogies('heir', 'heiress', 'princess') +find_analogies('nephew', 'niece', 'aunt') +find_analogies('france', 'paris', 'tokyo') +find_analogies('france', 'paris', 'beijing') +find_analogies('february', 'january', 'november') +find_analogies('france', 'paris', 'rome') +find_analogies('paris', 'france', 'italy') From e294364d156d34311cedc97b05fae7b2c08aefac Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 30 May 2018 04:02:44 -0400 Subject: [PATCH 040/329] speed up --- nlp_class2/pmi.py | 13 ++++++++----- unsupervised_class/gmm.py | 8 +++++++- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/nlp_class2/pmi.py b/nlp_class2/pmi.py index 13b5e1d6..9898063f 100644 --- a/nlp_class2/pmi.py +++ b/nlp_class2/pmi.py @@ -151,7 +151,7 @@ def remove_punctuation_3(s): # PMI(w, c) = #(w, c) / #(w) / p(c) pmi = wc_counts / wc_counts.sum(axis=1) / c_probs print("type(pmi):", type(pmi)) -logX = np.log(pmi.A + 1) #+ np.log(100) +logX = np.log(pmi.A + 1) print("type(logX):", type(logX)) logX[logX < 0] = 0 @@ -180,10 +180,13 @@ def remove_punctuation_3(s): costs.append(cost) # update W - for i in range(V): - matrix = reg*np.eye(D) + U.T.dot(U) - vector = (logX[i,:] - b[i] - c - mu).dot(U) - W[i] = np.linalg.solve(matrix, vector) + # for i in range(V): + # matrix = reg*np.eye(D) + U.T.dot(U) + # vector = (logX[i,:] - b[i] - c - mu).dot(U) + # W[i] = np.linalg.solve(matrix, vector) + matrix = reg*np.eye(D) + U.T.dot(U) + vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).dot(U) + W = np.linalg.solve(matrix, vector) # update b for i in range(V): diff --git a/unsupervised_class/gmm.py b/unsupervised_class/gmm.py index d96f74b7..e2663567 100644 --- a/unsupervised_class/gmm.py +++ b/unsupervised_class/gmm.py @@ -48,7 +48,13 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): Nk = R[:,k].sum() pi[k] = Nk / N M[k] = R[:,k].dot(X) / Nk - C[k] = np.sum(R[n,k]*np.outer(X[n] - M[k], X[n] - M[k]) for n in range(N)) / Nk + np.eye(D)*smoothing + + ## faster + delta = X - M[k] # N x D + Rdelta = np.expand_dims(R[:,k], -1) * delta # multiplies R[:,k] by each col. of delta - N x D + C[k] = Rdelta.T.dot(delta) / Nk + np.eye(D)*smoothing # D x D + ## slower + # C[k] = np.sum(R[n,k]*np.outer(X[n] - M[k], X[n] - M[k]) for n in range(N)) / Nk + np.eye(D)*smoothing costs[i] = np.log(weighted_pdfs.sum(axis=1)).sum() From 752f9ec867523b0c0ee6d81da5c33328147101cd Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 1 Jun 2018 20:32:07 -0400 Subject: [PATCH 041/329] extra reading --- nlp_class/extra_reading.txt | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 nlp_class/extra_reading.txt diff --git a/nlp_class/extra_reading.txt b/nlp_class/extra_reading.txt new file mode 100644 index 00000000..3d5dcb21 --- /dev/null +++ b/nlp_class/extra_reading.txt @@ -0,0 +1,23 @@ +LEARNING THE NAIVE BAYES CLASSIFIER WITH OPTIMIZATION MODELS +https://pdfs.semanticscholar.org/059c/36439a84c8d51443022352a94e2751c60d1c.pdf + +RANDOM FORESTS +https://www.stat.berkeley.edu/~breiman/randomforest2001.pdf + +Explaining AdaBoost +http://rob.schapire.net/papers/explaining-adaboost.pdf + +Understanding logistic regression analysis +https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3936971/ + +Indexing by Latent Semantic Analysis +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf + +Language Modeling +http://www.cs.columbia.edu/~mcollins/lm-spring2013.pdf + +Natural Language Processing with Python – Analyzing Text with the Natural Language Toolkit +https://www.nltk.org/book/ + +NLTK Documentation +https://media.readthedocs.org/pdf/nltk/latest/nltk.pdf \ No newline at end of file From de7eb708913c7862f8656c9c3c03526299231d86 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 3 Jun 2018 20:06:52 -0400 Subject: [PATCH 042/329] update comment --- nlp_class2/pos_tf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nlp_class2/pos_tf.py b/nlp_class2/pos_tf.py index 5370dabc..974453b6 100644 --- a/nlp_class2/pos_tf.py +++ b/nlp_class2/pos_tf.py @@ -154,8 +154,8 @@ def flatten(l): # get the output x = tf.nn.embedding_lookup(tfWe, inputs) -# converts x from a tensor of shape N x T x D -# into a list of length T, where each element is a tensor of shape N x D +# converts x from a tensor of shape N x T x M +# into a list of length T, where each element is a tensor of shape N x M x = tf.unstack(x, sequence_length, 1) # get the rnn output @@ -167,7 +167,7 @@ def flatten(l): outputs = tf.transpose(outputs, (1, 0, 2)) outputs = tf.reshape(outputs, (sequence_length*num_samples, hidden_layer_size)) # NT x M -# Linear activation, using rnn inner loop last output +# final dense layer logits = tf.matmul(outputs, tfWo) + tfbo # NT x K predictions = tf.argmax(logits, 1) predict_op = tf.reshape(predictions, (num_samples, sequence_length)) From 20b522ec9bfa48f96368c778190116e6da3f91d6 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 4 Jun 2018 00:18:21 -0400 Subject: [PATCH 043/329] update --- nlp_class2/pmi.py | 59 +++++++++++++++++++++++++++------------- nlp_class2/pretrained.py | 24 ++++++++++++++++ 2 files changed, 64 insertions(+), 19 deletions(-) diff --git a/nlp_class2/pmi.py b/nlp_class2/pmi.py index 9898063f..c777c21f 100644 --- a/nlp_class2/pmi.py +++ b/nlp_class2/pmi.py @@ -16,6 +16,7 @@ from scipy.spatial.distance import cosine as cos_dist from sklearn.metrics.pairwise import pairwise_distances from glob import glob +from datetime import datetime # input files @@ -173,36 +174,56 @@ def remove_punctuation_3(s): costs = [] +t0 = datetime.now() for epoch in range(10): print("epoch:", epoch) delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX cost = ( delta * delta ).sum() costs.append(cost) + ### partially vectorized updates ### # update W + # matrix = reg*np.eye(D) + U.T.dot(U) # for i in range(V): - # matrix = reg*np.eye(D) + U.T.dot(U) # vector = (logX[i,:] - b[i] - c - mu).dot(U) # W[i] = np.linalg.solve(matrix, vector) + + # # update b + # for i in range(V): + # numerator = (logX[i,:] - W[i].dot(U.T) - c - mu).sum() + # b[i] = numerator / V #/ (1 + reg) + + # # update U + # matrix = reg*np.eye(D) + W.T.dot(W) + # for j in range(V): + # vector = (logX[:,j] - b - c[j] - mu).dot(W) + # U[j] = np.linalg.solve(matrix, vector) + + # # update c + # for j in range(V): + # numerator = (logX[:,j] - W.dot(U[j]) - b - mu).sum() + # c[j] = numerator / V #/ (1 + reg) + + + ### vectorized updates ### + # vectorized update W matrix = reg*np.eye(D) + U.T.dot(U) - vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).dot(U) - W = np.linalg.solve(matrix, vector) - - # update b - for i in range(V): - numerator = (logX[i,:] - W[i].dot(U.T) - c - mu).sum() - b[i] = numerator / V #/ (1 + reg) - - # update U - for j in range(V): - matrix = reg*np.eye(D) + W.T.dot(W) - vector = (logX[:,j] - b - c[j] - mu).dot(W) - U[j] = np.linalg.solve(matrix, vector) - - # update c - for j in range(V): - numerator = (logX[:,j] - W.dot(U[j]) - b - mu).sum() - c[j] = numerator / V #/ (1 + reg) + vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).dot(U).T + W = np.linalg.solve(matrix, vector).T + + # vectorized update b + b = (logX - W.dot(U.T) - c.reshape(1, V) - mu).sum(axis=1) / V + + # vectorized update U + matrix = reg*np.eye(D) + W.T.dot(W) + vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).T.dot(W).T + U = np.linalg.solve(matrix, vector).T + + # vectorized update c + c = (logX - W.dot(U.T) - b.reshape(V, 1) - mu).sum(axis=0) / V + + +print("train duration:", datetime.now() - t0) plt.plot(costs) plt.show() diff --git a/nlp_class2/pretrained.py b/nlp_class2/pretrained.py index 5f616542..f06a07df 100644 --- a/nlp_class2/pretrained.py +++ b/nlp_class2/pretrained.py @@ -53,6 +53,7 @@ def find_analogies(w1, w2, w3): for w in (w1, w2, w3): if w not in word2vec: print("%s not in dictionary" % w) + return king = word2vec[w1] man = word2vec[w2] @@ -66,6 +67,20 @@ def find_analogies(w1, w2, w3): print(w1, "-", w2, "=", best_word, "-", w3) +def nearest_neighbors(w, n=5): + if w not in word2vec: + print("%s not in dictionary:" % w) + return + + v = word2vec[w] + distances = pairwise_distances(v.reshape(1, D), embedding, metric=metric).reshape(V) + idxs = distances.argsort()[1:n+1] + print("neighbors of: %s" % w) + for idx in idxs: + print("\t%s" % idx2word[idx]) + + + # load in pre-trained word vectors print('Loading word vectors...') word2vec = {} @@ -111,3 +126,12 @@ def find_analogies(w1, w2, w3): find_analogies('february', 'january', 'november') find_analogies('france', 'paris', 'rome') find_analogies('paris', 'france', 'italy') + +nearest_neighbors('king') +nearest_neighbors('france') +nearest_neighbors('japan') +nearest_neighbors('einstein') +nearest_neighbors('woman') +nearest_neighbors('nephew') +nearest_neighbors('february') +nearest_neighbors('rome') From dc141ddf01127d0ead46cfc22add7e1fbb8b2adf Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 5 Jun 2018 14:43:25 -0400 Subject: [PATCH 044/329] rntn tf --- nlp_class2/rntn_tensorflow_rnn.py | 337 ++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) create mode 100644 nlp_class2/rntn_tensorflow_rnn.py diff --git a/nlp_class2/rntn_tensorflow_rnn.py b/nlp_class2/rntn_tensorflow_rnn.py new file mode 100644 index 00000000..8cd830c2 --- /dev/null +++ b/nlp_class2/rntn_tensorflow_rnn.py @@ -0,0 +1,337 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import sys +import numpy as np +import matplotlib.pyplot as plt +import tensorflow as tf + +from sklearn.utils import shuffle +from util import init_weight, get_ptb_data, display_tree +from datetime import datetime +from sklearn.metrics import f1_score + + + +class RecursiveNN: + def __init__(self, V, D, K, activation=tf.tanh): + self.V = V + self.D = D + self.K = K + self.f = activation + + def fit(self, trees, test_trees, reg=1e-3, epochs=8, train_inner_nodes=False): + D = self.D + V = self.V + K = self.K + N = len(trees) + + We = init_weight(V, D) + W11 = np.random.randn(D, D, D) / np.sqrt(3*D) + W22 = np.random.randn(D, D, D) / np.sqrt(3*D) + W12 = np.random.randn(D, D, D) / np.sqrt(3*D) + W1 = init_weight(D, D) + W2 = init_weight(D, D) + bh = np.zeros(D) + Wo = init_weight(D, K) + bo = np.zeros(K) + + self.We = tf.Variable(We.astype(np.float32)) + self.W11 = tf.Variable(W11.astype(np.float32)) + self.W22 = tf.Variable(W22.astype(np.float32)) + self.W12 = tf.Variable(W12.astype(np.float32)) + self.W1 = tf.Variable(W1.astype(np.float32)) + self.W2 = tf.Variable(W2.astype(np.float32)) + self.bh = tf.Variable(bh.astype(np.float32)) + self.Wo = tf.Variable(Wo.astype(np.float32)) + self.bo = tf.Variable(bo.astype(np.float32)) + self.weights = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.Wo] + + + words = tf.placeholder(tf.int32, shape=(None,), name='words') + left_children = tf.placeholder(tf.int32, shape=(None,), name='left_children') + right_children = tf.placeholder(tf.int32, shape=(None,), name='right_children') + labels = tf.placeholder(tf.int32, shape=(None,), name='labels') + + # save for later + self.words = words + self.left = left_children + self.right = right_children + self.labels = labels + + def dot1(a, B): + return tf.tensordot(a, B, axes=[[0], [1]]) + + def dot2(B, a): + return tf.tensordot(B, a, axes=[[1], [0]]) + + def recursive_net_transform(hiddens, n): + h_left = hiddens.read(left_children[n]) + h_right = hiddens.read(right_children[n]) + return self.f( + dot1(h_left, dot2(self.W11, h_left)) + + dot1(h_right, dot2(self.W22, h_right)) + + dot1(h_left, dot2(self.W12, h_right)) + + dot1(h_left, self.W1) + + dot1(h_right, self.W2) + + self.bh + ) + + + def recurrence(hiddens, n): + w = words[n] + # any non-word will have index -1 + + h_n = tf.cond( + w >= 0, + lambda: tf.nn.embedding_lookup(self.We, w), + lambda: recursive_net_transform(hiddens, n) + ) + hiddens = hiddens.write(n, h_n) + n = tf.add(n, 1) + return hiddens, n + + + def condition(hiddens, n): + # loop should continue while n < len(words) + return tf.less(n, tf.shape(words)[0]) + + + hiddens = tf.TensorArray( + tf.float32, + size=0, + dynamic_size=True, + clear_after_read=False, + infer_shape=False + ) + + hiddens, _ = tf.while_loop( + condition, + recurrence, + [hiddens, tf.constant(0)], + parallel_iterations=1 + ) + h = hiddens.stack() + logits = tf.matmul(h, self.Wo) + self.bo + + prediction_op = tf.argmax(logits, axis=1) + self.prediction_op = prediction_op + + rcost = reg*sum(tf.nn.l2_loss(p) for p in self.weights) + if train_inner_nodes: + # filter out -1s + labeled_indices = tf.where(labels >= 0) + + cost_op = tf.reduce_mean( + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.gather(logits, labeled_indices), + labels=tf.gather(labels, labeled_indices), + ) + ) + rcost + else: + cost_op = tf.reduce_mean( + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits[-1], + labels=labels[-1], + ) + ) + rcost + + train_op = tf.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op) + # train_op = tf.train.MomentumOptimizer(learning_rate=8e-3, momentum=0.9).minimize(cost_op) + + # NOTE: If you're using GPU, InteractiveSession breaks + # AdagradOptimizer and some other optimizers + # change to tf.Session() if so. + self.session = tf.InteractiveSession() + init_op = tf.global_variables_initializer() + self.session.run(init_op) + + + costs = [] + sequence_indexes = range(N) + for i in range(epochs): + t0 = datetime.now() + sequence_indexes = shuffle(sequence_indexes) + n_correct = 0 + n_total = 0 + cost = 0 + it = 0 + for j in sequence_indexes: + words_, left, right, lab = trees[j] + # print("words_:", words_) + # print("lab:", lab) + c, p, _ = self.session.run( + (cost_op, prediction_op, train_op), + feed_dict={ + words: words_, + left_children: left, + right_children: right, + labels: lab + } + ) + if np.isnan(c): + print("Cost is nan! Let's stop here. \ + Why don't you try decreasing the learning rate?") + for p in self.params: + print(p.get_value().sum()) + exit() + cost += c + n_correct += (p[-1] == lab[-1]) + n_total += 1 + + it += 1 + if it % 10 == 0: + sys.stdout.write( + "j/N: %d/%d correct rate so far: %f, cost so far: %f\r" % + (it, N, float(n_correct)/n_total, cost) + ) + sys.stdout.flush() + + + # calculate the test score + n_test_correct = 0 + n_test_total = 0 + for words_, left, right, lab in test_trees: + p = self.session.run(prediction_op, feed_dict={ + words: words_, + left_children: left, + right_children: right, + labels: lab + }) + n_test_correct += (p[-1] == lab[-1]) + n_test_total += 1 + + + print( + "i:", i, "cost:", cost, + "train acc:", float(n_correct)/n_total, + "test acc:", float(n_test_correct)/n_test_total, + "time for epoch:", (datetime.now() - t0) + ) + costs.append(cost) + + plt.plot(costs) + plt.show() + + def predict(self, words, left, right, lab): + return self.session.run( + self.prediction_op, + feed_dict={ + self.words: words, + self.left: left, + self.right: right, + self.labels: lab + } + ) + + + def score(self, trees): + n_total = len(trees) + n_correct = 0 + for words, left, right, lab in trees: + p = self.predict(words, left, right, lab) + n_correct += (p[-1] == lab[-1]) + return float(n_correct) / n_total + + def f1_score(self, trees): + Y = [] + P = [] + for words, left, right, lab in trees: + p = self.predict(words, left, right, lab) + Y.append(lab[-1]) + P.append(p[-1]) + return f1_score(Y, P, average=None).mean() + + +def add_idx_to_tree(tree, current_idx): + # post-order labeling of tree nodes + if tree is None: + return current_idx + current_idx = add_idx_to_tree(tree.left, current_idx) + current_idx = add_idx_to_tree(tree.right, current_idx) + tree.idx = current_idx + current_idx += 1 + return current_idx + + +def tree2list(tree, parent_idx, is_binary=False): + if tree is None: + return [], [], [], [] + + words_left, left_child_left, right_child_left, labels_left = tree2list(tree.left, tree.idx, is_binary) + words_right, left_child_right, right_child_right, labels_right = tree2list(tree.right, tree.idx, is_binary) + + if tree.word is None: + w = -1 + left = tree.left.idx + right = tree.right.idx + else: + w = tree.word + left = -1 + right = -1 + + words = words_left + words_right + [w] + left_child = left_child_left + left_child_right + [left] + right_child = right_child_left + right_child_right + [right] + + if is_binary: + if tree.label > 2: + label = 1 + elif tree.label < 2: + label = 0 + else: + label = -1 # we will eventually filter these out + else: + label = tree.label + labels = labels_left + labels_right + [label] + + return words, left_child, right_child, labels + + +def main(is_binary=True): + train, test, word2idx = get_ptb_data() + + for t in train: + add_idx_to_tree(t, 0) + train = [tree2list(t, -1, is_binary) for t in train] + if is_binary: + train = [t for t in train if t[3][-1] >= 0] # for filtering binary labels + + for t in test: + add_idx_to_tree(t, 0) + test = [tree2list(t, -1, is_binary) for t in test] + if is_binary: + test = [t for t in test if t[3][-1] >= 0] # for filtering binary labels + + + + train = shuffle(train) + # train = train[:5000] + # n_pos = sum(t[3][-1] for t in train) + # print("n_pos train:", n_pos) + test = shuffle(test) + smalltest = test[:1000] + # n_pos = sum(t[3][-1] for t in test) + # print("n_pos test:", n_pos) + + V = len(word2idx) + print("vocab size:", V) + D = 10 + K = 2 if is_binary else 5 + + model = RecursiveNN(V, D, K) + model.fit(train, smalltest, reg=1e-3, epochs=20, train_inner_nodes=True) + print("train accuracy:", model.score(train)) + print("test accuracy:", model.score(test)) + print("train f1:", model.f1_score(train)) + print("test f1:", model.f1_score(test)) + + +if __name__ == '__main__': + main() From 9d98f74463d707afd352a501823787149e55f98c Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 6 Jun 2018 23:10:44 -0400 Subject: [PATCH 045/329] add another --- nlp_class3/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nlp_class3/extra_reading.txt b/nlp_class3/extra_reading.txt index b4d23a0c..bc94c5e6 100644 --- a/nlp_class3/extra_reading.txt +++ b/nlp_class3/extra_reading.txt @@ -41,4 +41,7 @@ https://research.googleblog.com/2017/12/tacotron-2-generating-human-like-speech. An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling https://arxiv.org/abs/1803.01271 -(just released March 2018!) \ No newline at end of file +(just released March 2018!) + +Relational recurrent neural networks +https://arxiv.org/abs/1806.01822 From f725100807a09accb92e36aa41bfb15d50473dd3 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Thu, 7 Jun 2018 19:53:01 -0400 Subject: [PATCH 046/329] w2v tf --- nlp_class2/word2vec_tf.py | 491 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 491 insertions(+) create mode 100644 nlp_class2/word2vec_tf.py diff --git a/nlp_class2/word2vec_tf.py b/nlp_class2/word2vec_tf.py new file mode 100644 index 00000000..a6aa70e0 --- /dev/null +++ b/nlp_class2/word2vec_tf.py @@ -0,0 +1,491 @@ +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import json +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +from scipy.special import expit as sigmoid +from sklearn.utils import shuffle +from datetime import datetime +# from util import find_analogies + +from scipy.spatial.distance import cosine as cos_dist +from sklearn.metrics.pairwise import pairwise_distances + + +from glob import glob + +import os +import sys +import string + + + +# unfortunately these work different ways +def remove_punctuation_2(s): + return s.translate(None, string.punctuation) + +def remove_punctuation_3(s): + return s.translate(str.maketrans('','',string.punctuation)) + +if sys.version.startswith('2'): + remove_punctuation = remove_punctuation_2 +else: + remove_punctuation = remove_punctuation_3 + + +def download_text8(dst): + pass + + +def get_text8(): + # download the data if it is not yet in the right place + path = '../large_files/text8' + if not os.path.exists(path): + download_text8(path) + + words = open(path).read() + word2idx = {} + sents = [[]] + count = 0 + for word in words.split(): + if word not in word2idx: + word2idx[word] = count + count += 1 + sents[0].append(word2idx[word]) + print("count:", count) + return sents, word2idx + + +def get_wiki(): + V = 20000 + files = glob('../large_files/enwiki*.txt') + all_word_counts = {} + for f in files: + for line in open(f): + if line and line[0] not in '[*-|=\{\}': + s = remove_punctuation(line).lower().split() + if len(s) > 1: + for word in s: + if word not in all_word_counts: + all_word_counts[word] = 0 + all_word_counts[word] += 1 + print("finished counting") + + V = min(V, len(all_word_counts)) + all_word_counts = sorted(all_word_counts.items(), key=lambda x: x[1], reverse=True) + + top_words = [w for w, count in all_word_counts[:V-1]] + [''] + word2idx = {w:i for i, w in enumerate(top_words)} + unk = word2idx[''] + + sents = [] + for f in files: + for line in open(f): + if line and line[0] not in '[*-|=\{\}': + s = remove_punctuation(line).lower().split() + if len(s) > 1: + # if a word is not nearby another word, there won't be any context! + # and hence nothing to train! + sent = [word2idx[w] if w in word2idx else unk for w in s] + sents.append(sent) + return sents, word2idx + + + + +def train_model(savedir): + # get the data + sentences, word2idx = get_wiki() #get_text8() + + + # number of unique words + vocab_size = len(word2idx) + + + # config + window_size = 10 + learning_rate = 0.025 + final_learning_rate = 0.0001 + num_negatives = 5 # number of negative samples to draw per input word + samples_per_epoch = int(1e5) + epochs = 25 + D = 50 # word embedding size + + # learning rate decay + learning_rate_delta = (learning_rate - final_learning_rate) / epochs + + # distribution for drawing negative samples + p_neg = get_negative_sampling_distribution(sentences) + + + # params + W = np.random.randn(vocab_size, D).astype(np.float32) # input-to-hidden + V = np.random.randn(D, vocab_size).astype(np.float32) # hidden-to-output + + + # create the model + tf_input = tf.placeholder(tf.int32, shape=(None,)) + tf_negword = tf.placeholder(tf.int32, shape=(None,)) + tf_context = tf.placeholder(tf.int32, shape=(None,)) # targets (context) + tfW = tf.Variable(W) + tfV = tf.Variable(V.T) + # biases = tf.Variable(np.zeros(vocab_size, dtype=np.float32)) + + def dot(A, B): + C = A * B + return tf.reduce_sum(C, axis=1) + + # correct middle word output + emb_input = tf.nn.embedding_lookup(tfW, tf_input) # 1 x D + emb_output = tf.nn.embedding_lookup(tfV, tf_context) # N x D + correct_output = dot(emb_input, emb_output) # N + # emb_input = tf.transpose(emb_input, (1, 0)) + # correct_output = tf.matmul(emb_output, emb_input) + pos_loss = tf.nn.sigmoid_cross_entropy_with_logits( + labels=tf.ones(tf.shape(correct_output)), logits=correct_output) + + # incorrect middle word output + emb_input = tf.nn.embedding_lookup(tfW, tf_negword) + incorrect_output = dot(emb_input, emb_output) + # emb_input = tf.transpose(emb_input, (1, 0)) + # incorrect_output = tf.matmul(emb_output, emb_input) + neg_loss = tf.nn.sigmoid_cross_entropy_with_logits( + labels=tf.zeros(tf.shape(incorrect_output)), logits=incorrect_output) + + # total loss + loss = tf.reduce_mean(pos_loss) + tf.reduce_mean(neg_loss) + + # output = hidden.dot(tfV) + + # loss + # neither of the built-in TF functions work well + # per_sample_loss = tf.nn.nce_loss( + # # per_sample_loss = tf.nn.sampled_softmax_loss( + # weights=tfV, + # biases=biases, + # labels=tfY, + # inputs=hidden, + # num_sampled=num_negatives, + # num_classes=vocab_size, + # ) + # loss = tf.reduce_mean(per_sample_loss) + + # optimizer + # train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) + train_op = tf.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss) + # train_op = tf.train.AdamOptimizer(1e-2).minimize(loss) + + # make session + session = tf.Session() + init_op = tf.global_variables_initializer() + session.run(init_op) + + + # save the costs to plot them per iteration + costs = [] + + + # number of total words in corpus + total_words = sum(len(sentence) for sentence in sentences) + print("total number of words in corpus:", total_words) + + + # train the model + for epoch in range(epochs): + # randomly order sentences so we don't always see + # sentences in the same order + np.random.shuffle(sentences) + + # accumulate the cost + cost = 0 + counter = 0 + inputs = [] + targets = [] + negwords = [] + for sentence in sentences: + # randomly order words so we don't always see + # samples in the same order + # randomly_ordered_positions = [pos for pos in range(len(sentence)) \ + # if p_neg[sentence[pos]] > np.random.random()] + randomly_ordered_positions = np.random.choice( + len(sentence), + size=np.random.randint(1, len(sentence) + 1), #samples_per_epoch, + replace=False, + ) + + + # keep only certain words based on p_neg + threshold = 1e-5 + p_drop = 1 - np.sqrt(threshold / p_neg) + randomly_ordered_positions = [i for i in randomly_ordered_positions \ + if np.random.random() < (1 - p_drop[sentence[i]]) + ] + # print("Reduced sentence size from %s to %s" % (len(sentence), len(randomly_ordered_positions))) + if len(randomly_ordered_positions) == 0: + continue + + # init + # TODO: don't need to randomly order positions + # since we'll do the whole sentence at once + # move call to train op outside the loop + + for j, pos in enumerate(randomly_ordered_positions): + # the middle word + word = sentence[pos] + + # get the positive context words/negative samples + context_words = get_context(pos, sentence, window_size) + neg_word = np.random.choice(vocab_size, p=p_neg) + + # combine them so we can loop over them all at once + # also shuffle, so we don't do all +ve then all-ve + # words_and_labels = join_samples(context_words, negative_samples) + # targets_ = np.array(context_words) + + n = len(context_words) + inputs += [word]*n + negwords += [neg_word]*n + # targets = np.concatenate([targets, targets_]) + targets += context_words + + # _, c = session.run( + # (train_op, loss), + # feed_dict={ + # tf_input: [word], + # tf_negword: [neg_word], + # tf_context: targets_, + # } + # ) + # cost += c + + + if len(inputs) >= 128: + _, c = session.run( + (train_op, loss), + feed_dict={ + tf_input: inputs, + tf_negword: negwords, + tf_context: targets, + } + ) + cost += c + + # reset + inputs = [] + targets = [] + negwords = [] + + counter += 1 + if counter % 100 == 0: + sys.stdout.write("processed %s / %s\r" % (counter, len(sentences))) + sys.stdout.flush() + # break + + + # print stuff so we don't stare at a blank screen + print("epoch complete:", epoch) + + # save the cost + costs.append(cost) + + # update the learning rate + learning_rate -= learning_rate_delta + + + # plot the cost per iteration + plt.plot(costs) + plt.show() + + # get the params + W, VT = session.run((tfW, tfV)) + V = VT.T + + # save the model + if not os.path.exists(savedir): + os.mkdir(savedir) + + with open('%s/word2idx.json' % savedir, 'w') as f: + json.dump(word2idx, f) + + np.savez('%s/weights.npz' % savedir, W, V) + + # return the model + return word2idx, W, V + + +def get_negative_sampling_distribution(sentences): + # Pn(w) = prob of word occuring + # we would like to sample the negative samples + # such that words that occur more often + # should be sampled more often + + word_freq = {} + word_count = sum(len(sentence) for sentence in sentences) + for sentence in sentences: + for word in sentence: + if word not in word_freq: + word_freq[word] = 0 + word_freq[word] += 1 + + # vocab size + V = len(word_freq) + + p_neg = np.zeros(V) + for j in range(V): + p_neg[j] = (word_freq[j] / float(V))**0.75 + + # normalize it + p_neg = p_neg / p_neg.sum() + + assert(np.all(p_neg > 0)) + return p_neg + + +def get_context(pos, sentence, window_size): + # input: + # a sentence of the form: x x x x c c c pos c c c x x x x + # output: + # the context word indices: c c c c c c + + start = max(0, pos - window_size) + end_ = min(len(sentence), pos + window_size) + + context = [] + for ctx_pos, ctx_word_idx in enumerate(sentence[start:end_], start=start): + if ctx_pos != pos: + # don't include the input word itself as a target + context.append(ctx_word_idx) + return context + + +def get_negative_samples(context, num_negatives, p_neg): + # randomly select some words not in the context + + # first copy p_neg so we can modify it by + # setting the sentence's word's probabilities to 0 + p_neg = p_neg.copy() + + for word in context: + p_neg[word] = 0 + + # re-normalize it so it remains a valid distribution + p_neg = p_neg / p_neg.sum() + + # draw the samples + neg_samples = np.random.choice( + len(p_neg), # vocab size + size=num_negatives, + replace=False, + p=p_neg, + ) + return neg_samples + + +def join_samples(context_words, negative_samples): + # we want to return a list of tuples of: + # word -> label + words_and_labels = [(w, 1) for w in context_words] + \ + [(w, 0) for w in negative_samples] + np.random.shuffle(words_and_labels) + return words_and_labels + + + +def load_model(savedir): + with open('%s/word2idx.json' % savedir) as f: + word2idx = json.load(f) + npz = np.load('%s/weights.npz' % savedir) + W = npz['arr_0'] + V = npz['arr_1'] + return word2idx, W, V + + + +def analogy(pos1, neg1, pos2, neg2, word2idx, idx2word, W): + V, D = W.shape + + # don't actually use pos2 in calculation, just print what's expected + print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2)) + for w in (pos1, neg1, pos2, neg2): + if w not in word2idx: + print("Sorry, %s not in word2idx" % w) + return + + p1 = W[word2idx[pos1]] + n1 = W[word2idx[neg1]] + p2 = W[word2idx[pos2]] + n2 = W[word2idx[neg2]] + + vec = p1 - n1 + n2 + + distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V) + idx = distances.argsort()[:10] + + # pick one that's not p1, n1, or n2 + best_idx = -1 + keep_out = [word2idx[w] for w in (pos1, neg1, neg2)] + for i in idx: + if i not in keep_out: + best_idx = i + break + + print("got: %s - %s = %s - %s" % (pos1, neg1, idx2word[idx[0]], neg2)) + print("closest 10:") + for i in idx: + print(idx2word[i], distances[i]) + + print("dist to %s:" % pos2, cos_dist(p2, vec)) + + +def test_model(word2idx, W, V): + # there are multiple ways to get the "final" word embedding + # We = (W + V.T) / 2 + # We = W + + idx2word = {i:w for w, i in word2idx.items()} + + for We in (W, (W + V.T) / 2): + print("**********") + + analogy('king', 'man', 'queen', 'woman', word2idx, idx2word, We) + analogy('king', 'prince', 'queen', 'princess', word2idx, idx2word, We) + analogy('miami', 'florida', 'dallas', 'texas', word2idx, idx2word, We) + analogy('einstein', 'scientist', 'picasso', 'painter', word2idx, idx2word, We) + analogy('japan', 'sushi', 'england', 'bread', word2idx, idx2word, We) + analogy('man', 'woman', 'he', 'she', word2idx, idx2word, We) + analogy('man', 'woman', 'uncle', 'aunt', word2idx, idx2word, We) + analogy('man', 'woman', 'brother', 'sister', word2idx, idx2word, We) + analogy('man', 'woman', 'husband', 'wife', word2idx, idx2word, We) + analogy('man', 'woman', 'actor', 'actress', word2idx, idx2word, We) + analogy('man', 'woman', 'father', 'mother', word2idx, idx2word, We) + analogy('heir', 'heiress', 'prince', 'princess', word2idx, idx2word, We) + analogy('nephew', 'niece', 'uncle', 'aunt', word2idx, idx2word, We) + analogy('france', 'paris', 'japan', 'tokyo', word2idx, idx2word, We) + analogy('france', 'paris', 'china', 'beijing', word2idx, idx2word, We) + analogy('february', 'january', 'december', 'november', word2idx, idx2word, We) + analogy('france', 'paris', 'germany', 'berlin', word2idx, idx2word, We) + analogy('week', 'day', 'year', 'month', word2idx, idx2word, We) + analogy('week', 'day', 'hour', 'minute', word2idx, idx2word, We) + analogy('france', 'paris', 'italy', 'rome', word2idx, idx2word, We) + analogy('paris', 'france', 'rome', 'italy', word2idx, idx2word, We) + analogy('france', 'french', 'england', 'english', word2idx, idx2word, We) + analogy('japan', 'japanese', 'china', 'chinese', word2idx, idx2word, We) + analogy('china', 'chinese', 'america', 'american', word2idx, idx2word, We) + analogy('japan', 'japanese', 'italy', 'italian', word2idx, idx2word, We) + analogy('japan', 'japanese', 'australia', 'australian', word2idx, idx2word, We) + analogy('walk', 'walking', 'swim', 'swimming', word2idx, idx2word, We) + + + +if __name__ == '__main__': + word2idx, W, V = train_model('w2v_tf') + # word2idx, W, V = load_model('w2v_tf') + test_model(word2idx, W, V) + From 9a06c87289dc21620aac70c78784891492a5928d Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 12 Jun 2018 14:58:52 -0400 Subject: [PATCH 047/329] update names --- rl2/mountaincar/pg_tf_random.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/rl2/mountaincar/pg_tf_random.py b/rl2/mountaincar/pg_tf_random.py index 430281a0..bb0d2a11 100644 --- a/rl2/mountaincar/pg_tf_random.py +++ b/rl2/mountaincar/pg_tf_random.py @@ -94,9 +94,10 @@ def get_output(layers): # calculate output and cost mean = get_output(self.mean_layers) - var = get_output(self.var_layers) + 1e-4 # smoothing + std = get_output(self.var_layers) + 1e-4 # smoothing - norm = tf.contrib.distributions.Normal(mean, var) + # note: the 'variance' is actually standard deviation + norm = tf.contrib.distributions.Normal(mean, std) self.predict_op = tf.clip_by_value(norm.sample(), -1, 1) From 1a757aa0d0ff18657bf03ee10cc90045810b2e86 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 14 Jun 2018 23:54:53 -0400 Subject: [PATCH 048/329] rename --- nlp_class2/{pretrained.py => pretrained_glove.py} | 1 + 1 file changed, 1 insertion(+) rename nlp_class2/{pretrained.py => pretrained_glove.py} (98%) diff --git a/nlp_class2/pretrained.py b/nlp_class2/pretrained_glove.py similarity index 98% rename from nlp_class2/pretrained.py rename to nlp_class2/pretrained_glove.py index f06a07df..ebae2f8a 100644 --- a/nlp_class2/pretrained.py +++ b/nlp_class2/pretrained_glove.py @@ -11,6 +11,7 @@ # WHERE TO GET THE VECTORS: # GloVe: https://nlp.stanford.edu/projects/glove/ +# Direct link: http://nlp.stanford.edu/data/glove.6B.zip import numpy as np from sklearn.metrics.pairwise import pairwise_distances From 2eb9ff47beb85723ed727f23e576adcbd1ba3d57 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 16 Jun 2018 13:51:04 -0400 Subject: [PATCH 049/329] update --- nlp_class2/tfidf_tsne.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/nlp_class2/tfidf_tsne.py b/nlp_class2/tfidf_tsne.py index 7861aba5..bceb652c 100644 --- a/nlp_class2/tfidf_tsne.py +++ b/nlp_class2/tfidf_tsne.py @@ -13,6 +13,7 @@ import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.manifold import TSNE +from sklearn.decomposition import TruncatedSVD, PCA, KernelPCA from datetime import datetime import os @@ -33,8 +34,9 @@ def main(): ('paris', 'france', 'italy'), ) + ### choose a data source ### # sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=1500) - sentences, word2idx = get_wikipedia_data(n_files=20, n_vocab=2000, by_paragraph=True) + sentences, word2idx = get_wikipedia_data(n_files=3, n_vocab=2000, by_paragraph=True) # with open('tfidf_word2idx.json', 'w') as f: # json.dump(word2idx, f) @@ -63,11 +65,12 @@ def main(): print("finished getting raw counts") transformer = TfidfTransformer() - A = transformer.fit_transform(A) - # print("type(A):", type(A)) - # exit() + A = transformer.fit_transform(A.T).T + + # tsne requires a dense array A = A.toarray() + # map back to word in plot idx2word = {v:k for k, v in iteritems(word2idx)} # plot the data in 2-D @@ -81,14 +84,21 @@ def main(): print("bad string:", idx2word[i]) plt.draw() - # create a higher-D word embedding, try word analogies - # tsne = TSNE(n_components=3) - # We = tsne.fit_transform(A) - We = Z + ### multiple ways to create vectors for each word ### + # 1) simply set it to the TF-IDF matrix + # We = A + + # 2) create a higher-D word embedding + tsne = TSNE(n_components=3) + We = tsne.fit_transform(A) + + # 3) use a classic dimensionality reduction technique + # svd = KernelPCA(n_components=20, kernel='rbf') + # We = svd.fit_transform(A) for word_list in analogies_to_try: w1, w2, w3 = word_list - find_analogies(w1, w2, w3, We, word2idx) + find_analogies(w1, w2, w3, We, word2idx, idx2word) plt.show() # pause script until plot is closed From a55f547901ed0a331bf72121039f278c571b0e11 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 16 Jun 2018 13:51:56 -0400 Subject: [PATCH 050/329] update --- nlp_class2/util.py | 55 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/nlp_class2/util.py b/nlp_class2/util.py index 45df0d42..5de1b4d1 100644 --- a/nlp_class2/util.py +++ b/nlp_class2/util.py @@ -10,33 +10,54 @@ import os import numpy as np +from sklearn.metrics.pairwise import pairwise_distances + + def init_weight(Mi, Mo): return np.random.randn(Mi, Mo) / np.sqrt(Mi + Mo) -def find_analogies(w1, w2, w3, We, word2idx): +# slow version +# def find_analogies(w1, w2, w3, We, word2idx): +# king = We[word2idx[w1]] +# man = We[word2idx[w2]] +# woman = We[word2idx[w3]] +# v0 = king - man + woman + +# def dist1(a, b): +# return np.linalg.norm(a - b) +# def dist2(a, b): +# return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b)) + +# for dist, name in [(dist1, 'Euclidean'), (dist2, 'cosine')]: +# min_dist = float('inf') +# best_word = '' +# for word, idx in iteritems(word2idx): +# if word not in (w1, w2, w3): +# v1 = We[idx] +# d = dist(v0, v1) +# if d < min_dist: +# min_dist = d +# best_word = word +# print("closest match by", name, "distance:", best_word) +# print(w1, "-", w2, "=", best_word, "-", w3) + +# fast version +def find_analogies(w1, w2, w3, We, word2idx, idx2word): + V, D = We.shape + king = We[word2idx[w1]] man = We[word2idx[w2]] woman = We[word2idx[w3]] v0 = king - man + woman - def dist1(a, b): - return np.linalg.norm(a - b) - def dist2(a, b): - return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b)) - - for dist, name in [(dist1, 'Euclidean'), (dist2, 'cosine')]: - min_dist = float('inf') - best_word = '' - for word, idx in iteritems(word2idx): - if word not in (w1, w2, w3): - v1 = We[idx] - d = dist(v0, v1) - if d < min_dist: - min_dist = d - best_word = word - print("closest match by", name, "distance:", best_word) + for dist in ('euclidean', 'cosine'): + distances = pairwise_distances(v0.reshape(1, D), We, metric=dist).reshape(V) + idx = distances.argmin() + best_word = idx2word[idx] + + print("closest match by", dist, "distance:", best_word) print(w1, "-", w2, "=", best_word, "-", w3) From cbad6ebbce1e757c6e824c6c045b07adf8e5bba0 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 21 Jun 2018 14:33:04 -0400 Subject: [PATCH 051/329] add multinomial nb --- supervised_class/multinomialnb.py | 58 +++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 supervised_class/multinomialnb.py diff --git a/supervised_class/multinomialnb.py b/supervised_class/multinomialnb.py new file mode 100644 index 00000000..a24155e6 --- /dev/null +++ b/supervised_class/multinomialnb.py @@ -0,0 +1,58 @@ +# https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-python +# https://www.udemy.com/data-science-supervised-machine-learning-in-python +# This is an example of a Naive Bayes classifier on MNIST data. +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from util import get_data +from datetime import datetime + +class MultinomialNB(object): + def fit(self, X, Y, smoothing=1.0): + # one-hot encode Y + K = len(set(Y)) # number of classes + N = len(Y) # number of samples + labels = Y + Y = np.zeros((N, K)) + Y[np.arange(N), labels] = 1 + + # D x K matrix of feature counts + # feature_counts[d,k] = count of feature d in class k + feature_counts = X.T.dot(Y) + smoothing + class_counts = Y.sum(axis=0) + + self.weights = np.log(feature_counts) - np.log(feature_counts.sum(axis=0)) + self.priors = np.log(class_counts) - np.log(class_counts.sum()) + + def score(self, X, Y): + P = self.predict(X) + return np.mean(P == Y) + + def predict(self, X): + P = X.dot(self.weights) + self.priors + return np.argmax(P, axis=1) + + +if __name__ == '__main__': + X, Y = get_data(10000) + Ntrain = len(Y) // 2 + Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain] + Xtest, Ytest = X[Ntrain:], Y[Ntrain:] + + model = MultinomialNB() + t0 = datetime.now() + model.fit(Xtrain, Ytrain) + print("Training time:", (datetime.now() - t0)) + + t0 = datetime.now() + print("Train accuracy:", model.score(Xtrain, Ytrain)) + print("Time to compute train accuracy:", (datetime.now() - t0), "Train size:", len(Ytrain)) + + t0 = datetime.now() + print("Test accuracy:", model.score(Xtest, Ytest)) + print("Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest)) From e47d5bb32a65ad1da222b67a89f9a674f5be929a Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Thu, 21 Jun 2018 16:07:14 -0400 Subject: [PATCH 052/329] update w2v --- nlp_class2/word2vec.py | 687 +++++++++++++++++++++-------------------- 1 file changed, 351 insertions(+), 336 deletions(-) diff --git a/nlp_class2/word2vec.py b/nlp_class2/word2vec.py index ea1c7182..8d225162 100644 --- a/nlp_class2/word2vec.py +++ b/nlp_class2/word2vec.py @@ -1,8 +1,6 @@ -# Course URL: # https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python # https://udemy.com/natural-language-processing-with-deep-learning-in-python from __future__ import print_function, division -from future.utils import iteritems from builtins import range # Note: you may need to update your version of future # sudo pip install -U future @@ -10,347 +8,364 @@ import json import numpy as np -import theano -import theano.tensor as T import matplotlib.pyplot as plt +from scipy.special import expit as sigmoid from sklearn.utils import shuffle from datetime import datetime -from util import find_analogies as _find_analogies +# from util import find_analogies + +from scipy.spatial.distance import cosine as cos_dist +from sklearn.metrics.pairwise import pairwise_distances + + +from glob import glob import os import sys +import string + sys.path.append(os.path.abspath('..')) -from rnn_class.util import get_wikipedia_data -from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx - - -def get_text8(): - words = open('../large_files/text8').read() - word2idx = {} - sents = [[]] - count = 0 - for word in words.split(): - if word not in word2idx: - word2idx[word] = count - count += 1 - sents[0].append(word2idx[word]) - print("count:", count) - return sents, word2idx - - -def sigmoid(x): - return 1 / (1 + np.exp(-x)) - - -def init_weights(shape): - return np.random.randn(*shape).astype(np.float32) / np.sqrt(sum(shape)) - - -class Model(object): - def __init__(self, D, V, context_sz): - self.D = D # embedding dimension - self.V = V # vocab size - # NOTE: we will look context_sz to the right AND context_sz to the left - # so the total number of targets is 2*context_sz - self.context_sz = context_sz - - def _get_pnw(self, X): - # calculate Pn(w) - probability distribution for negative sampling - # basically just the word probability ^ 3/4 - word_freq = {} - word_count = sum(len(x) for x in X) - for x in X: - for xj in x: - if xj not in word_freq: - word_freq[xj] = 0 - word_freq[xj] += 1 - self.Pnw = np.zeros(self.V) - for j in range(2, self.V): # 0 and 1 are the start and end tokens, we won't use those here - self.Pnw[j] = (word_freq[j] / float(word_count))**0.75 - - assert(np.all(self.Pnw[2:] > 0)) - return self.Pnw - - def _get_negative_samples(self, context, num_neg_samples): - # temporarily save context values because we don't want to negative sample these - saved = {} - for context_idx in context: - saved[context_idx] = self.Pnw[context_idx] - self.Pnw[context_idx] = 0 - neg_samples = np.random.choice( - range(self.V), - size=num_neg_samples, # this is arbitrary - number of negative samples to take - replace=False, - p=self.Pnw / np.sum(self.Pnw), - ) - for j, pnwj in iteritems(saved): - self.Pnw[j] = pnwj - assert(np.all(self.Pnw[2:] > 0)) - return neg_samples - - def fit(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epochs=10): - N = len(X) - V = self.V - D = self.D - self._get_pnw(X) - - # initialize weights and momentum changes - self.W1 = init_weights((V, D)) - self.W2 = init_weights((D, V)) - dW1 = np.zeros(self.W1.shape) - dW2 = np.zeros(self.W2.shape) - - costs = [] - cost_per_epoch = [] - sample_indices = range(N) - for i in range(epochs): - t0 = datetime.now() - sample_indices = shuffle(sample_indices) - cost_per_epoch_i = [] - for it in range(N): - j = sample_indices[it] - x = X[j] # one sentence - - # too short to do 1 iteration, skip - if len(x) < 2 * self.context_sz + 1: - continue - - cj = [] - n = len(x) - # for jj in range(n): - ########## try one random window per sentence ########### - jj = np.random.choice(n) - - # do the updates manually - Z = self.W1[x[jj],:] # note: paper uses linear activation function - - start = max(0, jj - self.context_sz) - end = min(n, jj + 1 + self.context_sz) - context = np.concatenate([x[start:jj], x[(jj+1):end]]) - # NOTE: context can contain DUPLICATES! - # e.g. " cats and dogs" - context = np.array(list(set(context)), dtype=np.int32) - - posA = Z.dot(self.W2[:,context]) - pos_pY = sigmoid(posA) - - neg_samples = self._get_negative_samples(context, num_neg_samples) - - # technically can remove this line now but leave for sanity checking - # neg_samples = np.setdiff1d(neg_samples, Y[j]) - negA = Z.dot(self.W2[:,neg_samples]) - neg_pY = sigmoid(-negA) - c = -np.log(pos_pY).sum() - np.log(neg_pY).sum() - cj.append(c / (num_neg_samples + len(context))) - - # positive samples - pos_err = pos_pY - 1 - dW2[:, context] = mu*dW2[:, context] - learning_rate*(np.outer(Z, pos_err) + reg*self.W2[:, context]) - - # negative samples - neg_err = 1 - neg_pY - dW2[:, neg_samples] = mu*dW2[:, neg_samples] - learning_rate*(np.outer(Z, neg_err) + reg*self.W2[:, neg_samples]) - - self.W2[:, context] += dW2[:, context] - # self.W2[:, context] /= np.linalg.norm(self.W2[:, context], axis=1, keepdims=True) - self.W2[:, neg_samples] += dW2[:, neg_samples] - # self.W2[:, neg_samples] /= np.linalg.norm(self.W2[:, neg_samples], axis=1, keepdims=True) - - # input weights - gradW1 = pos_err.dot(self.W2[:, context].T) + neg_err.dot(self.W2[:, neg_samples].T) - dW1[x[jj], :] = mu*dW1[x[jj], :] - learning_rate*(gradW1 + reg*self.W1[x[jj], :]) - - self.W1[x[jj], :] += dW1[x[jj], :] - # self.W1[x[jj], :] /= np.linalg.norm(self.W1[x[jj], :]) - - cj = np.mean(cj) - cost_per_epoch_i.append(cj) - costs.append(cj) - if it % 500 == 0: - sys.stdout.write("epoch: %d j: %d/ %d cost: %f\r" % (i, it, N, cj)) - sys.stdout.flush() - - epoch_cost = np.mean(cost_per_epoch_i) - cost_per_epoch.append(epoch_cost) - print( - "time to complete epoch %d:" % i, (datetime.now() - t0), - "cost:", epoch_cost - ) - plt.plot(costs) - plt.title("Numpy costs") - plt.show() - - plt.plot(cost_per_epoch) - plt.title("Numpy cost at each epoch") - plt.show() - - def fitt(self, X, num_neg_samples=10, learning_rate=1e-4, mu=0.99, reg=0.1, epochs=10): - N = len(X) - V = self.V - D = self.D - self._get_pnw(X) - - # initialize weights and momentum changes - W1 = init_weights((V, D)) - W2 = init_weights((D, V)) - W1 = theano.shared(W1) - W2 = theano.shared(W2) - - thInput = T.iscalar('input_word') - thContext = T.ivector('context') - thNegSamples = T.ivector('negative_samples') - - W1_subset = W1[thInput] - W2_psubset = W2[:, thContext] - W2_nsubset = W2[:, thNegSamples] - p_activation = W1_subset.dot(W2_psubset) - pos_pY = T.nnet.sigmoid(p_activation) - n_activation = W1_subset.dot(W2_nsubset) - neg_pY = T.nnet.sigmoid(-n_activation) - cost = -T.log(pos_pY).sum() - T.log(neg_pY).sum() - - W1_grad = T.grad(cost, W1_subset) - W2_pgrad = T.grad(cost, W2_psubset) - W2_ngrad = T.grad(cost, W2_nsubset) - - W1_update = T.inc_subtensor(W1_subset, -learning_rate*W1_grad) - W2_update = T.inc_subtensor( - T.inc_subtensor(W2_psubset, -learning_rate*W2_pgrad)[:,thNegSamples], -learning_rate*W2_ngrad) - # 2 updates for 1 variable - # http://stackoverflow.com/questions/15917849/how-can-i-assign-update-subset-of-tensor-shared-variable-in-theano - # http://deeplearning.net/software/theano/tutorial/faq_tutorial.html - # https://groups.google.com/forum/#!topic/theano-users/hdwaFyrNvHQ - - updates = [(W1, W1_update), (W2, W2_update)] - - train_op = theano.function( - inputs=[thInput, thContext, thNegSamples], - outputs=cost, - updates=updates, - allow_input_downcast=True, - ) - - costs = [] - cost_per_epoch = [] - sample_indices = range(N) - for i in range(epochs): - t0 = datetime.now() - sample_indices = shuffle(sample_indices) - cost_per_epoch_i = [] - for it in range(N): - j = sample_indices[it] - x = X[j] # one sentence - - # too short to do 1 iteration, skip - if len(x) < 2 * self.context_sz + 1: - continue - - cj = [] - n = len(x) - # for jj in range(n): - - # start = max(0, jj - self.context_sz) - # end = min(n, jj + 1 + self.context_sz) - # context = np.concatenate([x[start:jj], x[(jj+1):end]]) - # # NOTE: context can contain DUPLICATES! - # # e.g. " cats and dogs" - # context = np.array(list(set(context)), dtype=np.int32) - # neg_samples = self._get_negative_samples(context, num_neg_samples) - - # c = train_op(x[jj], context, neg_samples) - # cj.append(c / (num_neg_samples + len(context))) - - ########## try one random window per sentence ########### - jj = np.random.choice(n) - start = max(0, jj - self.context_sz) - end = min(n, jj + 1 + self.context_sz) - context = np.concatenate([x[start:jj], x[(jj+1):end]]) - # NOTE: context can contain DUPLICATES! - # e.g. " cats and dogs" - context = np.array(list(set(context)), dtype=np.int32) - neg_samples = self._get_negative_samples(context, num_neg_samples) - - c = train_op(x[jj], context, neg_samples) - cj.append(c / (num_neg_samples + len(context))) - ######################################################### - - - cj = np.mean(cj) - cost_per_epoch_i.append(cj) - costs.append(cj) - if it % 100 == 0: - sys.stdout.write("epoch: %d j: %d/ %d cost: %f\r" % (i, it, N, cj)) - sys.stdout.flush() - - epoch_cost = np.mean(cost_per_epoch_i) - cost_per_epoch.append(epoch_cost) - print( - "time to complete epoch %d:" % i, (datetime.now() - t0), - "cost:", epoch_cost - ) - - self.W1 = W1.get_value() - self.W2 = W2.get_value() - - plt.plot(costs) - plt.title("Theano costs") - plt.show() - - plt.plot(cost_per_epoch) - plt.title("Theano cost at each epoch") - plt.show() - - def save(self, fn): - arrays = [self.W1, self.W2] - np.savez(fn, *arrays) - - -def main(use_brown=True): - if use_brown: - sentences, word2idx = get_sentences_with_word2idx_limit_vocab() - # sentences, word2idx = get_sentences_with_word2idx() - # sentences, word2idx = get_text8() - else: - sentences, word2idx = get_wikipedia_data(n_files=1, n_vocab=2000) - with open('w2v_word2idx.json', 'w') as f: - json.dump(word2idx, f) - - V = len(word2idx) - model = Model(50, V, 5) - - # use numpy - # model.fit(sentences, learning_rate=1e-3, mu=0, epochs=5, num_neg_samples=5) - - # use theano - model.fitt(sentences, learning_rate=1e-3, mu=0, epochs=5, num_neg_samples=5) - - model.save('w2v_model.npz') - - -def find_analogies(w1, w2, w3, concat=True, we_file='w2v_model.npz', w2i_file='w2v_word2idx.json'): - npz = np.load(we_file) - W1 = npz['arr_0'] - W2 = npz['arr_1'] - - with open(w2i_file) as f: - word2idx = json.load(f) - - V = len(word2idx) - - if concat: - We = np.hstack([W1, W2.T]) - print("We.shape:", We.shape) - assert(V == We.shape[0]) - else: - We = (W1 + W2.T) / 2 +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab as get_brown + + + +# unfortunately these work different ways +def remove_punctuation_2(s): + return s.translate(None, string.punctuation) + +def remove_punctuation_3(s): + return s.translate(str.maketrans('','',string.punctuation)) + +if sys.version.startswith('2'): + remove_punctuation = remove_punctuation_2 +else: + remove_punctuation = remove_punctuation_3 + + + + +def get_wiki(): + V = 20000 + files = glob('../large_files/enwiki*.txt') + all_word_counts = {} + for f in files: + for line in open(f): + if line and line[0] not in '[*-|=\{\}': + s = remove_punctuation(line).lower().split() + if len(s) > 1: + for word in s: + if word not in all_word_counts: + all_word_counts[word] = 0 + all_word_counts[word] += 1 + print("finished counting") + + V = min(V, len(all_word_counts)) + all_word_counts = sorted(all_word_counts.items(), key=lambda x: x[1], reverse=True) + + top_words = [w for w, count in all_word_counts[:V-1]] + [''] + word2idx = {w:i for i, w in enumerate(top_words)} + unk = word2idx[''] + + sents = [] + for f in files: + for line in open(f): + if line and line[0] not in '[*-|=\{\}': + s = remove_punctuation(line).lower().split() + if len(s) > 1: + # if a word is not nearby another word, there won't be any context! + # and hence nothing to train! + sent = [word2idx[w] if w in word2idx else unk for w in s] + sents.append(sent) + return sents, word2idx + + + + +def train_model(savedir): + # get the data + sentences, word2idx = get_wiki() #get_brown() + + + # number of unique words + vocab_size = len(word2idx) + + + # config + window_size = 5 + learning_rate = 0.025 + final_learning_rate = 0.0001 + num_negatives = 5 # number of negative samples to draw per input word + samples_per_epoch = int(1e5) + epochs = 20 + D = 50 # word embedding size + + + # learning rate decay + learning_rate_delta = (learning_rate - final_learning_rate) / epochs + + + # params + W = np.random.randn(vocab_size, D) # input-to-hidden + V = np.random.randn(D, vocab_size) # hidden-to-output + + + # distribution for drawing negative samples + p_neg = get_negative_sampling_distribution(sentences) + + + # save the costs to plot them per iteration + costs = [] + + + # number of total words in corpus + total_words = sum(len(sentence) for sentence in sentences) + print("total number of words in corpus:", total_words) + + + # train the model + for epoch in range(epochs): + # randomly order sentences so we don't always see + # sentences in the same order + np.random.shuffle(sentences) + + # accumulate the cost + cost = 0 + counter = 0 + for sentence in sentences: + # randomly order words so we don't always see + # samples in the same order + # randomly_ordered_positions = [pos for pos in range(len(sentence)) \ + # if p_neg[sentence[pos]] > np.random.random()] + randomly_ordered_positions = np.random.choice( + len(sentence), + size=np.random.randint(1, len(sentence) + 1), #samples_per_epoch, + replace=False, + ) + + + # keep only certain words based on p_neg + threshold = 1e-5 + p_drop = 1 - np.sqrt(threshold / p_neg) + randomly_ordered_positions = [i for i in randomly_ordered_positions \ + if np.random.random() < (1 - p_drop[sentence[i]]) + ] + # print("Reduced sentence size from %s to %s" % (len(sentence), len(randomly_ordered_positions))) + if len(randomly_ordered_positions) == 0: + continue + + + for pos in randomly_ordered_positions: + # the middle word + word = sentence[pos] + + # get the positive context words/negative samples + context_words = get_context(pos, sentence, window_size) + # negative_samples = get_negative_samples(context_words, num_negatives, p_neg) + # print("V:", V, "p_neg.shape:", p_neg.shape) + neg_word = np.random.choice(vocab_size, p=p_neg) + + # combine them so we can loop over them all at once + # also shuffle, so we don't do all +ve then all-ve + # words_and_labels = join_samples(context_words, negative_samples) + targets = np.array(context_words) + # for other_word, label in words_and_labels: + + # do one iteration of stochastic gradient descent + c = sgd(word, targets, 1, learning_rate, W, V) + cost += c + c = sgd(neg_word, targets, 0, learning_rate, W, V) + cost += c + + counter += 1 + if counter % 100 == 0: + sys.stdout.write("processed %s / %s\r" % (counter, len(sentences))) + sys.stdout.flush() + # break + + + # print stuff so we don't stare at a blank screen + print("epoch complete:", epoch) + + # save the cost + costs.append(cost) + + # update the learning rate + learning_rate -= learning_rate_delta + + + # plot the cost per iteration + plt.plot(costs) + plt.show() + + + # save the model + if not os.path.exists(savedir): + os.mkdir(savedir) + + with open('%s/word2idx.json' % savedir, 'w') as f: + json.dump(word2idx, f) + + np.savez('%s/weights.npz' % savedir, W, V) + + # return the model + return word2idx, W, V + + +def get_negative_sampling_distribution(sentences): + # Pn(w) = prob of word occuring + # we would like to sample the negative samples + # such that words that occur more often + # should be sampled more often + + word_freq = {} + word_count = sum(len(sentence) for sentence in sentences) + for sentence in sentences: + for word in sentence: + if word not in word_freq: + word_freq[word] = 0 + word_freq[word] += 1 + + # vocab size + V = len(word_freq) + + p_neg = np.zeros(V) + for j in range(V): + p_neg[j] = word_freq[j]**0.75 + + # normalize it + p_neg = p_neg / p_neg.sum() + + assert(np.all(p_neg > 0)) + return p_neg + + +def get_context(pos, sentence, window_size): + # input: + # a sentence of the form: x x x x c c c pos c c c x x x x + # output: + # the context word indices: c c c c c c + + start = max(0, pos - window_size) + end_ = min(len(sentence), pos + window_size) + + context = [] + for ctx_pos, ctx_word_idx in enumerate(sentence[start:end_], start=start): + if ctx_pos != pos: + # don't include the input word itself as a target + context.append(ctx_word_idx) + return context + + +def sgd(input_, targets, label, learning_rate, W, V): + # W[input_] shape: D + # V[:,targets] shape: D x N + # activation shape: N + # print("input_:", input_, "targets:", targets) + activation = W[input_].dot(V[:,targets]) + prob = sigmoid(activation) + + # gradients + gV = np.outer(W[input_], prob - label) # D x N + gW = np.sum((prob - label)*V[:,targets], axis=1) # D + + V[:,targets] -= learning_rate*gV # D x N + W[input_] -= learning_rate*gW # D + + # return cost (binary cross entropy) + cost = label * np.log(prob + 1e-10) + (1 - label) * np.log(1 - prob + 1e-10) + return cost.sum() + + +def load_model(savedir): + with open('%s/word2idx.json' % savedir) as f: + word2idx = json.load(f) + npz = np.load('%s/weights.npz' % savedir) + W = npz['arr_0'] + V = npz['arr_1'] + return word2idx, W, V + + + +def analogy(pos1, neg1, pos2, neg2, word2idx, idx2word, W): + V, D = W.shape + + # don't actually use pos2 in calculation, just print what's expected + print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2)) + for w in (pos1, neg1, pos2, neg2): + if w not in word2idx: + print("Sorry, %s not in word2idx" % w) + return + + p1 = W[word2idx[pos1]] + n1 = W[word2idx[neg1]] + p2 = W[word2idx[pos2]] + n2 = W[word2idx[neg2]] + + vec = p1 - n1 + n2 + + distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V) + idx = distances.argsort()[:10] + + # pick one that's not p1, n1, or n2 + best_idx = -1 + keep_out = [word2idx[w] for w in (pos1, neg1, neg2)] + # print("keep_out:", keep_out) + for i in idx: + if i not in keep_out: + best_idx = i + break + # print("best_idx:", best_idx) + + print("got: %s - %s = %s - %s" % (pos1, neg1, idx2word[best_idx], neg2)) + print("closest 10:") + for i in idx: + print(idx2word[i], distances[i]) + + print("dist to %s:" % pos2, cos_dist(p2, vec)) + + +def test_model(word2idx, W, V): + # there are multiple ways to get the "final" word embedding + # We = (W + V.T) / 2 + # We = W + + idx2word = {i:w for w, i in word2idx.items()} + + for We in (W, (W + V.T) / 2): + print("**********") + + analogy('king', 'man', 'queen', 'woman', word2idx, idx2word, We) + analogy('king', 'prince', 'queen', 'princess', word2idx, idx2word, We) + analogy('miami', 'florida', 'dallas', 'texas', word2idx, idx2word, We) + analogy('einstein', 'scientist', 'picasso', 'painter', word2idx, idx2word, We) + analogy('japan', 'sushi', 'germany', 'bratwurst', word2idx, idx2word, We) + analogy('man', 'woman', 'he', 'she', word2idx, idx2word, We) + analogy('man', 'woman', 'uncle', 'aunt', word2idx, idx2word, We) + analogy('man', 'woman', 'brother', 'sister', word2idx, idx2word, We) + analogy('man', 'woman', 'husband', 'wife', word2idx, idx2word, We) + analogy('man', 'woman', 'actor', 'actress', word2idx, idx2word, We) + analogy('man', 'woman', 'father', 'mother', word2idx, idx2word, We) + analogy('heir', 'heiress', 'prince', 'princess', word2idx, idx2word, We) + analogy('nephew', 'niece', 'uncle', 'aunt', word2idx, idx2word, We) + analogy('france', 'paris', 'japan', 'tokyo', word2idx, idx2word, We) + analogy('france', 'paris', 'china', 'beijing', word2idx, idx2word, We) + analogy('february', 'january', 'december', 'november', word2idx, idx2word, We) + analogy('france', 'paris', 'germany', 'berlin', word2idx, idx2word, We) + analogy('week', 'day', 'year', 'month', word2idx, idx2word, We) + analogy('week', 'day', 'hour', 'minute', word2idx, idx2word, We) + analogy('france', 'paris', 'italy', 'rome', word2idx, idx2word, We) + analogy('paris', 'france', 'rome', 'italy', word2idx, idx2word, We) + analogy('france', 'french', 'england', 'english', word2idx, idx2word, We) + analogy('japan', 'japanese', 'china', 'chinese', word2idx, idx2word, We) + analogy('china', 'chinese', 'america', 'american', word2idx, idx2word, We) + analogy('japan', 'japanese', 'italy', 'italian', word2idx, idx2word, We) + analogy('japan', 'japanese', 'australia', 'australian', word2idx, idx2word, We) + analogy('walk', 'walking', 'swim', 'swimming', word2idx, idx2word, We) + - _find_analogies(w1, w2, w3, We, word2idx) if __name__ == '__main__': - main(use_brown=False) - for concat in (True, False): - print("** concat:", concat) - find_analogies('king', 'man', 'woman', concat=concat) - find_analogies('france', 'paris', 'london', concat=concat) - find_analogies('france', 'paris', 'rome', concat=concat) - find_analogies('paris', 'france', 'italy', concat=concat) + word2idx, W, V = train_model('w2v_model') + # word2idx, W, V = load_model('w2v_model') + test_model(word2idx, W, V) + From 1eaccf9729cf1b265a87e58302ddb1dc4e023176 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Thu, 21 Jun 2018 23:43:54 -0400 Subject: [PATCH 053/329] update --- nlp_class2/word2vec.py | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/nlp_class2/word2vec.py b/nlp_class2/word2vec.py index 8d225162..ff387ca9 100644 --- a/nlp_class2/word2vec.py +++ b/nlp_class2/word2vec.py @@ -95,8 +95,7 @@ def train_model(savedir): learning_rate = 0.025 final_learning_rate = 0.0001 num_negatives = 5 # number of negative samples to draw per input word - samples_per_epoch = int(1e5) - epochs = 20 + epochs = 5 D = 50 # word embedding size @@ -110,7 +109,7 @@ def train_model(savedir): # distribution for drawing negative samples - p_neg = get_negative_sampling_distribution(sentences) + p_neg = get_negative_sampling_distribution(sentences, vocab_size) # save the costs to plot them per iteration @@ -138,7 +137,7 @@ def train_model(savedir): # if p_neg[sentence[pos]] > np.random.random()] randomly_ordered_positions = np.random.choice( len(sentence), - size=np.random.randint(1, len(sentence) + 1), #samples_per_epoch, + size=np.random.randint(1, len(sentence) + 1), replace=False, ) @@ -184,7 +183,7 @@ def train_model(savedir): # print stuff so we don't stare at a blank screen - print("epoch complete:", epoch) + print("epoch complete:", epoch, "cost:", cost) # save the cost costs.append(cost) @@ -211,26 +210,20 @@ def train_model(savedir): return word2idx, W, V -def get_negative_sampling_distribution(sentences): +def get_negative_sampling_distribution(sentences, vocab_size): # Pn(w) = prob of word occuring # we would like to sample the negative samples # such that words that occur more often # should be sampled more often - word_freq = {} + word_freq = np.zeros(vocab_size) word_count = sum(len(sentence) for sentence in sentences) for sentence in sentences: for word in sentence: - if word not in word_freq: - word_freq[word] = 0 word_freq[word] += 1 - - # vocab size - V = len(word_freq) - p_neg = np.zeros(V) - for j in range(V): - p_neg[j] = word_freq[j]**0.75 + # smooth it + p_neg = word_freq**0.75 # normalize it p_neg = p_neg / p_neg.sum() From 14a26c06a4427b3efe5ffd1d26284c1bc744d8aa Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Sun, 24 Jun 2018 14:12:03 -0400 Subject: [PATCH 054/329] update --- nlp_class2/word2vec.py | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/nlp_class2/word2vec.py b/nlp_class2/word2vec.py index ff387ca9..ba92e68c 100644 --- a/nlp_class2/word2vec.py +++ b/nlp_class2/word2vec.py @@ -95,7 +95,7 @@ def train_model(savedir): learning_rate = 0.025 final_learning_rate = 0.0001 num_negatives = 5 # number of negative samples to draw per input word - epochs = 5 + epochs = 20 D = 50 # word embedding size @@ -120,6 +120,10 @@ def train_model(savedir): total_words = sum(len(sentence) for sentence in sentences) print("total number of words in corpus:", total_words) + # for subsampling each sentence + threshold = 1e-5 + p_drop = 1 - np.sqrt(threshold / p_neg) + # train the model for epoch in range(epochs): @@ -130,28 +134,24 @@ def train_model(savedir): # accumulate the cost cost = 0 counter = 0 + t0 = datetime.now() for sentence in sentences: + # keep only certain words based on p_neg + sentence = [w for w in sentence \ + if np.random.random() < (1 - p_drop[w]) + ] + if len(sentence) < 2: + continue + + # randomly order words so we don't always see # samples in the same order - # randomly_ordered_positions = [pos for pos in range(len(sentence)) \ - # if p_neg[sentence[pos]] > np.random.random()] randomly_ordered_positions = np.random.choice( len(sentence), - size=np.random.randint(1, len(sentence) + 1), + size=len(sentence),#np.random.randint(1, len(sentence) + 1), replace=False, ) - - # keep only certain words based on p_neg - threshold = 1e-5 - p_drop = 1 - np.sqrt(threshold / p_neg) - randomly_ordered_positions = [i for i in randomly_ordered_positions \ - if np.random.random() < (1 - p_drop[sentence[i]]) - ] - # print("Reduced sentence size from %s to %s" % (len(sentence), len(randomly_ordered_positions))) - if len(randomly_ordered_positions) == 0: - continue - for pos in randomly_ordered_positions: # the middle word @@ -159,15 +159,8 @@ def train_model(savedir): # get the positive context words/negative samples context_words = get_context(pos, sentence, window_size) - # negative_samples = get_negative_samples(context_words, num_negatives, p_neg) - # print("V:", V, "p_neg.shape:", p_neg.shape) neg_word = np.random.choice(vocab_size, p=p_neg) - - # combine them so we can loop over them all at once - # also shuffle, so we don't do all +ve then all-ve - # words_and_labels = join_samples(context_words, negative_samples) targets = np.array(context_words) - # for other_word, label in words_and_labels: # do one iteration of stochastic gradient descent c = sgd(word, targets, 1, learning_rate, W, V) @@ -183,7 +176,8 @@ def train_model(savedir): # print stuff so we don't stare at a blank screen - print("epoch complete:", epoch, "cost:", cost) + dt = datetime.now() - t0 + print("epoch complete:", epoch, "cost:", cost, "dt:", dt) # save the cost costs.append(cost) From 3e9067cf040d48fb1b9111a08376a0e48b7279e5 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 25 Jun 2018 01:11:19 -0400 Subject: [PATCH 055/329] update --- nlp_class2/extra_reading.txt | 11 +++++- nlp_class2/glove.py | 66 ++++++++++++++++++++++++------------ nlp_class2/glove_tf.py | 47 ++++++++++++++++++------- nlp_class2/glove_theano.py | 42 +++++++++++++++++------ nlp_class2/util.py | 13 +++++-- 5 files changed, 133 insertions(+), 46 deletions(-) diff --git a/nlp_class2/extra_reading.txt b/nlp_class2/extra_reading.txt index 5e992840..bbb408ad 100644 --- a/nlp_class2/extra_reading.txt +++ b/nlp_class2/extra_reading.txt @@ -1,2 +1,11 @@ Neural Word Embedding as Implicit Matrix Factorization -http://papers.nips.cc/paper/5477-neural-word-embedding-as-implicit-matrix-factorization.pdf \ No newline at end of file +http://papers.nips.cc/paper/5477-neural-word-embedding-as-implicit-matrix-factorization.pdf + +Hierarchical Softmax +http://www.iro.umontreal.ca/~lisa/pointeurs/hierarchical-nnlm-aistats05.pdf + +More about Hierarchical Softmax +http://papers.nips.cc/paper/3583-a-scalable-hierarchical-distributed-language-model.pdf + +Distributed Representations of Words and Phrases and their Compositionality +https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf \ No newline at end of file diff --git a/nlp_class2/glove.py b/nlp_class2/glove.py index 10f370d8..d6f344ed 100644 --- a/nlp_class2/glove.py +++ b/nlp_class2/glove.py @@ -14,7 +14,13 @@ from datetime import datetime from sklearn.utils import shuffle -from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab +from util import find_analogies + + +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx # using ALS, what's the least # files to get correct analogies? # use this for word2vec training to make it faster @@ -230,7 +236,7 @@ def save(self, fn): np.savez(fn, *arrays) -def main(we_file, w2i_file, use_brown=True, n_files=50): +def main(we_file, w2i_file, use_brown=True, n_files=100): if use_brown: cc_matrix = "cc_matrix_brown.npy" else: @@ -260,20 +266,20 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): json.dump(word2idx, f) V = len(word2idx) - model = Glove(100, V, 10) + model = Glove(200, V, 10) # alternating least squares method - # model.fit(sentences, cc_matrix=cc_matrix, epochs=20) + model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # gradient descent method - model.fit( - sentences, - cc_matrix=cc_matrix, - learning_rate=5e-4, - reg=0.1, - epochs=500, - gd=True, - ) + # model.fit( + # sentences, + # cc_matrix=cc_matrix, + # learning_rate=5e-4, + # reg=0.1, + # epochs=500, + # gd=True, + # ) model.save(we_file) @@ -283,14 +289,32 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): # we = 'glove_model_brown.npz' # w2i = 'glove_word2idx_brown.json' main(we, w2i, use_brown=False) + + # load back embeddings + npz = np.load(we) + W1 = npz['arr_0'] + W2 = npz['arr_1'] + + with open(w2i) as f: + word2idx = json.load(f) + idx2word = {i:w for w,i in word2idx.items()} + for concat in (True, False): print("** concat:", concat) - find_analogies('king', 'man', 'woman', concat, we, w2i) - find_analogies('france', 'paris', 'london', concat, we, w2i) - find_analogies('france', 'paris', 'rome', concat, we, w2i) - find_analogies('paris', 'france', 'italy', concat, we, w2i) - find_analogies('france', 'french', 'english', concat, we, w2i) - find_analogies('japan', 'japanese', 'chinese', concat, we, w2i) - find_analogies('japan', 'japanese', 'italian', concat, we, w2i) - find_analogies('japan', 'japanese', 'australian', concat, we, w2i) - find_analogies('december', 'november', 'june', concat, we, w2i) + + if concat: + We = np.hstack([W1, W2.T]) + else: + We = (W1 + W2.T) / 2 + + + find_analogies('king', 'man', 'woman', We, word2idx, idx2word) + find_analogies('france', 'paris', 'london', We, word2idx, idx2word) + find_analogies('france', 'paris', 'rome', We, word2idx, idx2word) + find_analogies('paris', 'france', 'italy', We, word2idx, idx2word) + find_analogies('france', 'french', 'english', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word) + find_analogies('december', 'november', 'june', We, word2idx, idx2word) + diff --git a/nlp_class2/glove_tf.py b/nlp_class2/glove_tf.py index c86afff1..aa8371ad 100644 --- a/nlp_class2/glove_tf.py +++ b/nlp_class2/glove_tf.py @@ -15,7 +15,13 @@ from datetime import datetime from sklearn.utils import shuffle -from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab +from util import find_analogies + +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx + class Glove: @@ -24,7 +30,7 @@ def __init__(self, D, V, context_sz): self.V = V self.context_sz = context_sz - def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False, use_theano=False, use_tensorflow=False): + def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10): # build co-occurrence matrix # paper calls it X, so we will call it X, instead of calling # the training data X @@ -119,7 +125,7 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, delta = tf.matmul(tfW, tf.transpose(tfU)) + tfb + tfc + mu - tfLogX cost = tf.reduce_sum(tffX * delta * delta) regularized_cost = cost - for param in (tfW, tfb, tfU, tfc): + for param in (tfW, tfU): regularized_cost += reg*tf.reduce_sum(param * param) train_op = tf.train.MomentumOptimizer( @@ -189,14 +195,31 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): we = 'glove_model_50.npz' w2i = 'glove_word2idx_50.json' main(we, w2i, use_brown=False) + + # load back embeddings + npz = np.load(we) + W1 = npz['arr_0'] + W2 = npz['arr_1'] + + with open(w2i) as f: + word2idx = json.load(f) + idx2word = {i:w for w,i in word2idx.items()} + for concat in (True, False): print("** concat:", concat) - find_analogies('king', 'man', 'woman', concat, we, w2i) - find_analogies('france', 'paris', 'london', concat, we, w2i) - find_analogies('france', 'paris', 'rome', concat, we, w2i) - find_analogies('paris', 'france', 'italy', concat, we, w2i) - find_analogies('france', 'french', 'english', concat, we, w2i) - find_analogies('japan', 'japanese', 'chinese', concat, we, w2i) - find_analogies('japan', 'japanese', 'italian', concat, we, w2i) - find_analogies('japan', 'japanese', 'australian', concat, we, w2i) - find_analogies('december', 'november', 'june', concat, we, w2i) + + if concat: + We = np.hstack([W1, W2.T]) + else: + We = (W1 + W2.T) / 2 + + + find_analogies('king', 'man', 'woman', We, word2idx, idx2word) + find_analogies('france', 'paris', 'london', We, word2idx, idx2word) + find_analogies('france', 'paris', 'rome', We, word2idx, idx2word) + find_analogies('paris', 'france', 'italy', We, word2idx, idx2word) + find_analogies('france', 'french', 'english', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word) + find_analogies('december', 'november', 'june', We, word2idx, idx2word) diff --git a/nlp_class2/glove_theano.py b/nlp_class2/glove_theano.py index 33705ca8..c979e76f 100644 --- a/nlp_class2/glove_theano.py +++ b/nlp_class2/glove_theano.py @@ -16,7 +16,12 @@ from datetime import datetime from sklearn.utils import shuffle -from word2vec import get_wikipedia_data, find_analogies, get_sentences_with_word2idx_limit_vocab +from util import find_analogies + +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx def momentum_updates(cost, params, lr=1e-4, mu=0.9): @@ -220,14 +225,31 @@ def main(we_file, w2i_file, use_brown=True, n_files=50): # we = 'glove_model_brown.npz' # w2i = 'glove_word2idx_brown.json' main(we, w2i, use_brown=False) + + # load back embeddings + npz = np.load(we) + W1 = npz['arr_0'] + W2 = npz['arr_1'] + + with open(w2i) as f: + word2idx = json.load(f) + idx2word = {i:w for w,i in word2idx.items()} + for concat in (True, False): print("** concat:", concat) - find_analogies('king', 'man', 'woman', concat, we, w2i) - find_analogies('france', 'paris', 'london', concat, we, w2i) - find_analogies('france', 'paris', 'rome', concat, we, w2i) - find_analogies('paris', 'france', 'italy', concat, we, w2i) - find_analogies('france', 'french', 'english', concat, we, w2i) - find_analogies('japan', 'japanese', 'chinese', concat, we, w2i) - find_analogies('japan', 'japanese', 'italian', concat, we, w2i) - find_analogies('japan', 'japanese', 'australian', concat, we, w2i) - find_analogies('december', 'november', 'june', concat, we, w2i) + + if concat: + We = np.hstack([W1, W2.T]) + else: + We = (W1 + W2.T) / 2 + + + find_analogies('king', 'man', 'woman', We, word2idx, idx2word) + find_analogies('france', 'paris', 'london', We, word2idx, idx2word) + find_analogies('france', 'paris', 'rome', We, word2idx, idx2word) + find_analogies('paris', 'france', 'italy', We, word2idx, idx2word) + find_analogies('france', 'french', 'english', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word) + find_analogies('december', 'november', 'june', We, word2idx, idx2word) diff --git a/nlp_class2/util.py b/nlp_class2/util.py index 5de1b4d1..f2a79888 100644 --- a/nlp_class2/util.py +++ b/nlp_class2/util.py @@ -54,8 +54,17 @@ def find_analogies(w1, w2, w3, We, word2idx, idx2word): for dist in ('euclidean', 'cosine'): distances = pairwise_distances(v0.reshape(1, D), We, metric=dist).reshape(V) - idx = distances.argmin() - best_word = idx2word[idx] + # idx = distances.argmin() + # best_word = idx2word[idx] + idx = distances.argsort()[:4] + best_idx = -1 + keep_out = [word2idx[w] for w in (w1, w2, w3)] + for i in idx: + if i not in keep_out: + best_idx = i + break + best_word = idx2word[best_idx] + print("closest match by", dist, "distance:", best_word) print(w1, "-", w2, "=", best_word, "-", w3) From df5dbc04988797d2d11768a8f8c4a6c8b5a508a1 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 25 Jun 2018 01:12:22 -0400 Subject: [PATCH 056/329] update --- nlp_class2/pretrained_glove.py | 9 +++++++-- nlp_class2/rntn_tensorflow_rnn.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nlp_class2/pretrained_glove.py b/nlp_class2/pretrained_glove.py index ebae2f8a..241b0c03 100644 --- a/nlp_class2/pretrained_glove.py +++ b/nlp_class2/pretrained_glove.py @@ -32,6 +32,7 @@ def dist2(a, b): # for w in (w1, w2, w3): # if w not in word2vec: # print("%s not in dictionary" % w) +# return # king = word2vec[w1] # man = word2vec[w2] @@ -62,8 +63,12 @@ def find_analogies(w1, w2, w3): v0 = king - man + woman distances = pairwise_distances(v0.reshape(1, D), embedding, metric=metric).reshape(V) - idx = distances.argmin() - best_word = idx2word[idx] + idxs = distances.argsort()[:4] + for idx in idxs: + word = idx2word[idx] + if word not in (w1, w2, w3): + best_word = word + break print(w1, "-", w2, "=", best_word, "-", w3) diff --git a/nlp_class2/rntn_tensorflow_rnn.py b/nlp_class2/rntn_tensorflow_rnn.py index 8cd830c2..29caba32 100644 --- a/nlp_class2/rntn_tensorflow_rnn.py +++ b/nlp_class2/rntn_tensorflow_rnn.py @@ -148,7 +148,7 @@ def condition(hiddens, n): # NOTE: If you're using GPU, InteractiveSession breaks # AdagradOptimizer and some other optimizers # change to tf.Session() if so. - self.session = tf.InteractiveSession() + self.session = tf.Session() init_op = tf.global_variables_initializer() self.session.run(init_op) From d0692c9a4a2f9ad1130144ce5046235d5a8231ed Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Mon, 25 Jun 2018 14:10:04 -0400 Subject: [PATCH 057/329] update --- nlp_class2/word2vec_tf.py | 78 +++++++++++---------------------------- 1 file changed, 21 insertions(+), 57 deletions(-) diff --git a/nlp_class2/word2vec_tf.py b/nlp_class2/word2vec_tf.py index a6aa70e0..fee4ad99 100644 --- a/nlp_class2/word2vec_tf.py +++ b/nlp_class2/word2vec_tf.py @@ -115,7 +115,7 @@ def train_model(savedir): final_learning_rate = 0.0001 num_negatives = 5 # number of negative samples to draw per input word samples_per_epoch = int(1e5) - epochs = 25 + epochs = 20 D = 50 # word embedding size # learning rate decay @@ -197,6 +197,11 @@ def dot(A, B): print("total number of words in corpus:", total_words) + # for subsampling each sentence + threshold = 1e-5 + p_drop = 1 - np.sqrt(threshold / p_neg) + + # train the model for epoch in range(epochs): # randomly order sentences so we don't always see @@ -209,33 +214,27 @@ def dot(A, B): inputs = [] targets = [] negwords = [] + t0 = datetime.now() for sentence in sentences: + + # keep only certain words based on p_neg + sentence = [w for w in sentence \ + if np.random.random() < (1 - p_drop[w]) + ] + if len(sentence) < 2: + continue + + # randomly order words so we don't always see # samples in the same order - # randomly_ordered_positions = [pos for pos in range(len(sentence)) \ - # if p_neg[sentence[pos]] > np.random.random()] randomly_ordered_positions = np.random.choice( len(sentence), - size=np.random.randint(1, len(sentence) + 1), #samples_per_epoch, + # size=np.random.randint(1, len(sentence) + 1), + size=len(sentence), replace=False, ) - # keep only certain words based on p_neg - threshold = 1e-5 - p_drop = 1 - np.sqrt(threshold / p_neg) - randomly_ordered_positions = [i for i in randomly_ordered_positions \ - if np.random.random() < (1 - p_drop[sentence[i]]) - ] - # print("Reduced sentence size from %s to %s" % (len(sentence), len(randomly_ordered_positions))) - if len(randomly_ordered_positions) == 0: - continue - - # init - # TODO: don't need to randomly order positions - # since we'll do the whole sentence at once - # move call to train op outside the loop - for j, pos in enumerate(randomly_ordered_positions): # the middle word word = sentence[pos] @@ -244,10 +243,6 @@ def dot(A, B): context_words = get_context(pos, sentence, window_size) neg_word = np.random.choice(vocab_size, p=p_neg) - # combine them so we can loop over them all at once - # also shuffle, so we don't do all +ve then all-ve - # words_and_labels = join_samples(context_words, negative_samples) - # targets_ = np.array(context_words) n = len(context_words) inputs += [word]*n @@ -290,7 +285,8 @@ def dot(A, B): # print stuff so we don't stare at a blank screen - print("epoch complete:", epoch) + dt = datetime.now() - t0 + print("epoch complete:", epoch, "cost:", cost, "dt:", dt) # save the cost costs.append(cost) @@ -339,7 +335,7 @@ def get_negative_sampling_distribution(sentences): p_neg = np.zeros(V) for j in range(V): - p_neg[j] = (word_freq[j] / float(V))**0.75 + p_neg[j] = word_freq[j]**0.75 # normalize it p_neg = p_neg / p_neg.sum() @@ -365,38 +361,6 @@ def get_context(pos, sentence, window_size): return context -def get_negative_samples(context, num_negatives, p_neg): - # randomly select some words not in the context - - # first copy p_neg so we can modify it by - # setting the sentence's word's probabilities to 0 - p_neg = p_neg.copy() - - for word in context: - p_neg[word] = 0 - - # re-normalize it so it remains a valid distribution - p_neg = p_neg / p_neg.sum() - - # draw the samples - neg_samples = np.random.choice( - len(p_neg), # vocab size - size=num_negatives, - replace=False, - p=p_neg, - ) - return neg_samples - - -def join_samples(context_words, negative_samples): - # we want to return a list of tuples of: - # word -> label - words_and_labels = [(w, 1) for w in context_words] + \ - [(w, 0) for w in negative_samples] - np.random.shuffle(words_and_labels) - return words_and_labels - - def load_model(savedir): with open('%s/word2idx.json' % savedir) as f: From a019afc41d1702ac29d8b7ef2bc2ae4b600a2ab3 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 28 Jun 2018 21:28:12 -0400 Subject: [PATCH 058/329] show misclassified --- nlp_class/sentiment.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/nlp_class/sentiment.py b/nlp_class/sentiment.py index e7e2ed15..ad592094 100644 --- a/nlp_class/sentiment.py +++ b/nlp_class/sentiment.py @@ -16,6 +16,7 @@ import nltk import numpy as np +from sklearn.utils import shuffle from nltk.stem import WordNetLemmatizer from sklearn.linear_model import LogisticRegression @@ -76,8 +77,10 @@ def my_tokenizer(s): current_index = 0 positive_tokenized = [] negative_tokenized = [] +orig_reviews = [] for review in positive_reviews: + orig_reviews.append(review.text) tokens = my_tokenizer(review.text) positive_tokenized.append(tokens) for token in tokens: @@ -86,6 +89,7 @@ def my_tokenizer(s): current_index += 1 for review in negative_reviews: + orig_reviews.append(review.text) tokens = my_tokenizer(review.text) negative_tokenized.append(tokens) for token in tokens: @@ -93,6 +97,7 @@ def my_tokenizer(s): word_index_map[token] = current_index current_index += 1 +print("len(word_index_map):", len(word_index_map)) # now let's create our input matrices def tokens_to_vector(tokens, label): @@ -120,7 +125,7 @@ def tokens_to_vector(tokens, label): # shuffle the data and create train/test splits # try it multiple times! -np.random.shuffle(data) +orig_reviews, data = shuffle(orig_reviews, data) X = data[:,:-1] Y = data[:,-1] @@ -133,7 +138,8 @@ def tokens_to_vector(tokens, label): model = LogisticRegression() model.fit(Xtrain, Ytrain) -print("Classification rate:", model.score(Xtest, Ytest)) +print("Train accuracy:", model.score(Xtrain, Ytrain)) +print("Test accuracy:", model.score(Xtest, Ytest)) # let's look at the weights for each word @@ -143,3 +149,30 @@ def tokens_to_vector(tokens, label): weight = model.coef_[0][index] if weight > threshold or weight < -threshold: print(word, weight) + + +# check misclassified examples +P = model.predict_proba(X)[:,1] # p(y = 1 | x) + +# since there are many, just print the "most" wrong samples +minP_whenYis1 = 1 +maxP_whenYis0 = 0 +wrong_positive_review = None +wrong_negative_review = None +for i in range(N): + p = P[i] + y = Y[i] + if y == 1 and p < 0.5: + if p < minP_whenYis1: + wrong_positive_review = orig_reviews[i] + minP_whenYis1 = p + elif y == 0 and p > 0.5: + if p > maxP_whenYis0: + wrong_negative_review = orig_reviews[i] + maxP_whenYis0 = p + +print("Most wrong positive review (prob = %s):" % minP_whenYis1) +print(wrong_positive_review) +print("Most wrong negative review (prob = %s):" % maxP_whenYis0) +print(wrong_negative_review) + From 8672ba286172b521c4d893cac98256044a39e0ca Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 28 Jun 2018 21:34:09 -0400 Subject: [PATCH 059/329] update --- nlp_class/sentiment.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nlp_class/sentiment.py b/nlp_class/sentiment.py index ad592094..c852ac2f 100644 --- a/nlp_class/sentiment.py +++ b/nlp_class/sentiment.py @@ -152,6 +152,7 @@ def tokens_to_vector(tokens, label): # check misclassified examples +preds = model.predict(X) P = model.predict_proba(X)[:,1] # p(y = 1 | x) # since there are many, just print the "most" wrong samples @@ -159,20 +160,24 @@ def tokens_to_vector(tokens, label): maxP_whenYis0 = 0 wrong_positive_review = None wrong_negative_review = None +wrong_positive_prediction = None +wrong_negative_prediction = None for i in range(N): p = P[i] y = Y[i] if y == 1 and p < 0.5: if p < minP_whenYis1: wrong_positive_review = orig_reviews[i] + wrong_positive_prediction = preds[i] minP_whenYis1 = p elif y == 0 and p > 0.5: if p > maxP_whenYis0: wrong_negative_review = orig_reviews[i] + wrong_negative_prediction = preds[i] maxP_whenYis0 = p -print("Most wrong positive review (prob = %s):" % minP_whenYis1) +print("Most wrong positive review (prob = %s, pred = %s):" % (minP_whenYis1, wrong_positive_prediction)) print(wrong_positive_review) -print("Most wrong negative review (prob = %s):" % maxP_whenYis0) +print("Most wrong negative review (prob = %s, pred = %s):" % (maxP_whenYis0, wrong_negative_prediction)) print(wrong_negative_review) From 8e4e8f01f7dac199ddc4301791884c2c44205271 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 29 Jun 2018 02:41:57 -0400 Subject: [PATCH 060/329] update --- nlp_class2/pmi.py | 8 +++++--- nlp_class2/pretrained_glove.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nlp_class2/pmi.py b/nlp_class2/pmi.py index c777c21f..28690a12 100644 --- a/nlp_class2/pmi.py +++ b/nlp_class2/pmi.py @@ -133,7 +133,9 @@ def remove_punctuation_3(s): start = max(0, i - context_size) end = min(len(line_as_idx), i + context_size) - for c in line_as_idx[start:end]: + for c in line_as_idx[start:i]: + wc_counts[w, c] += 1 + for c in line_as_idx[i+1:end]: wc_counts[w, c] += 1 print("Finished counting") @@ -161,8 +163,8 @@ def remove_punctuation_3(s): # latent dimension -D = 50 -reg = 0. +D = 100 +reg = 0.1 # initialize weights diff --git a/nlp_class2/pretrained_glove.py b/nlp_class2/pretrained_glove.py index 241b0c03..b5e60463 100644 --- a/nlp_class2/pretrained_glove.py +++ b/nlp_class2/pretrained_glove.py @@ -92,7 +92,7 @@ def nearest_neighbors(w, n=5): word2vec = {} embedding = [] idx2word = [] -with open('../large_files/glove.6B/glove.6B.50d.txt') as f: +with open('../large_files/glove.6B/glove.6B.50d.txt', encoding='utf-8') as f: # is just a space-separated text file in the format: # word vec[0] vec[1] vec[2] ... for line in f: From 4a52b07e163aa49c2b2a6ac3b9631d13ca1c3474 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 5 Jul 2018 13:26:47 -0400 Subject: [PATCH 061/329] update --- nlp_class2/bow_classifier.py | 146 +++++++++++++++++++++++++++++++++++ nlp_class2/pretrained_w2v.py | 82 ++++++++++++++++++++ 2 files changed, 228 insertions(+) create mode 100644 nlp_class2/bow_classifier.py create mode 100644 nlp_class2/pretrained_w2v.py diff --git a/nlp_class2/bow_classifier.py b/nlp_class2/bow_classifier.py new file mode 100644 index 00000000..60c1a92d --- /dev/null +++ b/nlp_class2/bow_classifier.py @@ -0,0 +1,146 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import sys +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier +from gensim.models import KeyedVectors + + +# data from https://www.cs.umb.edu/~smimarog/textmining/datasets/ +train = pd.read_csv('../large_files/r8-train-all-terms.txt', header=None, sep='\t') +test = pd.read_csv('../large_files/r8-test-all-terms.txt', header=None, sep='\t') +train.columns = ['label', 'content'] +test.columns = ['label', 'content'] + + + +class GloveVectorizer: + def __init__(self): + # load in pre-trained word vectors + print('Loading word vectors...') + word2vec = {} + embedding = [] + idx2word = [] + with open('../large_files/glove.6B/glove.6B.50d.txt') as f: + # is just a space-separated text file in the format: + # word vec[0] vec[1] vec[2] ... + for line in f: + values = line.split() + word = values[0] + vec = np.asarray(values[1:], dtype='float32') + word2vec[word] = vec + embedding.append(vec) + idx2word.append(word) + print('Found %s word vectors.' % len(word2vec)) + + # save for later + self.word2vec = word2vec + self.embedding = np.array(embedding) + self.word2idx = {v:k for k,v in enumerate(idx2word)} + self.V, self.D = self.embedding.shape + + def fit(self, data): + pass + + def transform(self, data): + X = np.zeros((len(data), self.D)) + n = 0 + emptycount = 0 + for sentence in data: + tokens = sentence.lower().split() + vecs = [] + for word in tokens: + if word in self.word2vec: + vec = self.word2vec[word] + vecs.append(vec) + if len(vecs) > 0: + vecs = np.array(vecs) + X[n] = vecs.mean(axis=0) + else: + emptycount += 1 + n += 1 + print("Numer of samples with no words found: %s / %s" % (emptycount, len(data))) + return X + + def fit_transform(self, data): + self.fit(data) + return self.transform(data) + + + + +class Word2VecVectorizer: + def __init__(self): + print("Loading in word vectors...") + self.word_vectors = KeyedVectors.load_word2vec_format( + '../large_files/GoogleNews-vectors-negative300.bin', + binary=True + ) + print("Finished loading in word vectors") + + def fit(self, data): + pass + + def transform(self, data): + # determine the dimensionality of vectors + v = self.word_vectors.get_vector('king') + self.D = v.shape[0] + + X = np.zeros((len(data), self.D)) + n = 0 + emptycount = 0 + for sentence in data: + tokens = sentence.split() + vecs = [] + m = 0 + for word in tokens: + try: + # throws KeyError if word not found + vec = self.word_vectors.get_vector(word) + vecs.append(vec) + m += 1 + except KeyError: + pass + if len(vecs) > 0: + vecs = np.array(vecs) + X[n] = vecs.mean(axis=0) + else: + emptycount += 1 + n += 1 + print("Numer of samples with no words found: %s / %s" % (emptycount, len(data))) + return X + + + def fit_transform(self, data): + self.fit(data) + return self.transform(data) + + + +vectorizer = GloveVectorizer() +# vectorizer = Word2VecVectorizer() +Xtrain = vectorizer.fit_transform(train.content) +Ytrain = train.label + +Xtest = vectorizer.transform(test.content) +Ytest = test.label + + + +# create the model, train it, print scores +model = RandomForestClassifier(n_estimators=200) +model.fit(Xtrain, Ytrain) +print("train score:", model.score(Xtrain, Ytrain)) +print("test score:", model.score(Xtest, Ytest)) + + diff --git a/nlp_class2/pretrained_w2v.py b/nlp_class2/pretrained_w2v.py new file mode 100644 index 00000000..730d3134 --- /dev/null +++ b/nlp_class2/pretrained_w2v.py @@ -0,0 +1,82 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +from gensim.models import KeyedVectors + + +# warning: takes quite awhile +# https://code.google.com/archive/p/word2vec/ +# direct link: https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing +# 3 million words and phrases +# D = 300 +word_vectors = KeyedVectors.load_word2vec_format( + '../large_files/GoogleNews-vectors-negative300.bin', + binary=True +) + + +# convenience +# result looks like: +# [('athens', 0.6001024842262268), +# ('albert', 0.5729557275772095), +# ('holmes', 0.569324254989624), +# ('donnie', 0.5690680742263794), +# ('italy', 0.5673537254333496), +# ('toni', 0.5666348338127136), +# ('spain', 0.5661854147911072), +# ('jh', 0.5661597847938538), +# ('pablo', 0.5631559491157532), +# ('malta', 0.5620371103286743)] +def find_analogies(w1, w2, w3): + r = word_vectors.most_similar(positive=[w1, w3], negative=[w2]) + print("%s - %s = %s - %s" % (w1, w2, r[0][0], w3)) + +def nearest_neighbors(w): + r = word_vectors.most_similar(positive=[w]) + print("neighbors of: %s" % w) + for word, score in r: + print("\t%s" % word) + + +find_analogies('king', 'man', 'woman') +find_analogies('france', 'paris', 'london') +find_analogies('france', 'paris', 'rome') +find_analogies('paris', 'france', 'italy') +find_analogies('france', 'french', 'english') +find_analogies('japan', 'japanese', 'chinese') +find_analogies('japan', 'japanese', 'italian') +find_analogies('japan', 'japanese', 'australian') +find_analogies('december', 'november', 'june') +find_analogies('miami', 'florida', 'texas') +find_analogies('einstein', 'scientist', 'painter') +find_analogies('china', 'rice', 'bread') +find_analogies('man', 'woman', 'she') +find_analogies('man', 'woman', 'aunt') +find_analogies('man', 'woman', 'sister') +find_analogies('man', 'woman', 'wife') +find_analogies('man', 'woman', 'actress') +find_analogies('man', 'woman', 'mother') +find_analogies('heir', 'heiress', 'princess') +find_analogies('nephew', 'niece', 'aunt') +find_analogies('france', 'paris', 'tokyo') +find_analogies('france', 'paris', 'beijing') +find_analogies('february', 'january', 'november') +find_analogies('france', 'paris', 'rome') +find_analogies('paris', 'france', 'italy') + +nearest_neighbors('king') +nearest_neighbors('france') +nearest_neighbors('japan') +nearest_neighbors('einstein') +nearest_neighbors('woman') +nearest_neighbors('nephew') +nearest_neighbors('february') +nearest_neighbors('rome') \ No newline at end of file From 58b1d465f9edcaa0683b3df3e265a328dee1aa0e Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Thu, 12 Jul 2018 03:48:52 -0400 Subject: [PATCH 062/329] add gpu rnn units --- nlp_class3/attention.py | 4 ++++ nlp_class3/bilstm_mnist.py | 4 ++++ nlp_class3/bilstm_test.py | 5 +++++ nlp_class3/lstm_toxic.py | 5 +++++ nlp_class3/poetry.py | 5 +++++ nlp_class3/simple_rnn_test.py | 5 +++++ nlp_class3/wseq2seq.py | 5 +++++ 7 files changed, 33 insertions(+) diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index ae778880..6e427ac7 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -16,6 +16,10 @@ import numpy as np import matplotlib.pyplot as plt +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + # make sure we do softmax over the time axis # expected shape is N x T x D diff --git a/nlp_class3/bilstm_mnist.py b/nlp_class3/bilstm_mnist.py index a17e04dc..9a79b546 100644 --- a/nlp_class3/bilstm_mnist.py +++ b/nlp_class3/bilstm_mnist.py @@ -13,6 +13,10 @@ import pandas as pd import matplotlib.pyplot as plt +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + def get_mnist(limit=None): if not os.path.exists('../large_files'): diff --git a/nlp_class3/bilstm_test.py b/nlp_class3/bilstm_test.py index b6a1e181..800902f9 100644 --- a/nlp_class3/bilstm_test.py +++ b/nlp_class3/bilstm_test.py @@ -9,6 +9,11 @@ import numpy as np import matplotlib.pyplot as plt +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + T = 8 D = 2 diff --git a/nlp_class3/lstm_toxic.py b/nlp_class3/lstm_toxic.py index c232feea..8dc41f97 100644 --- a/nlp_class3/lstm_toxic.py +++ b/nlp_class3/lstm_toxic.py @@ -19,6 +19,11 @@ from keras.optimizers import Adam from sklearn.metrics import roc_auc_score +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + # Download the data: # https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge diff --git a/nlp_class3/poetry.py b/nlp_class3/poetry.py index 123df4b9..5b3cfac7 100644 --- a/nlp_class3/poetry.py +++ b/nlp_class3/poetry.py @@ -17,6 +17,11 @@ from keras.preprocessing.sequence import pad_sequences from keras.optimizers import Adam, SGD +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + # some configuration MAX_SEQUENCE_LENGTH = 100 diff --git a/nlp_class3/simple_rnn_test.py b/nlp_class3/simple_rnn_test.py index d7e13a07..b67a4b29 100644 --- a/nlp_class3/simple_rnn_test.py +++ b/nlp_class3/simple_rnn_test.py @@ -9,6 +9,11 @@ import numpy as np import matplotlib.pyplot as plt +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + T = 8 D = 2 diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index 7f6c8a9f..5f1ea9ac 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -15,6 +15,11 @@ import numpy as np import matplotlib.pyplot as plt +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + # some config BATCH_SIZE = 64 # Batch size for training. From 8d31f5a28f406603ba7bce1fc1eb1139b0eb1035 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Thu, 12 Jul 2018 04:07:06 -0400 Subject: [PATCH 063/329] take away dropout --- nlp_class3/attention.py | 6 +++++- nlp_class3/wseq2seq.py | 13 +++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index 6e427ac7..d7e9c809 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -202,7 +202,11 @@ def softmax_over_time(x): # Set up the encoder - simple! encoder_inputs_placeholder = Input(shape=(max_len_input,)) x = embedding_layer(encoder_inputs_placeholder) -encoder = Bidirectional(LSTM(LATENT_DIM, return_sequences=True, dropout=0.5)) +encoder = Bidirectional(LSTM( + LATENT_DIM, + return_sequences=True, + # dropout=0.5 # dropout not available on gpu +)) encoder_outputs = encoder(x) diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index 5f1ea9ac..384dbf46 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -174,7 +174,11 @@ ##### build the model ##### encoder_inputs_placeholder = Input(shape=(max_len_input,)) x = embedding_layer(encoder_inputs_placeholder) -encoder = LSTM(LATENT_DIM, return_state=True, dropout=0.5) +encoder = LSTM( + LATENT_DIM, + return_state=True, + # dropout=0.5 # dropout not available on gpu +) encoder_outputs, h, c = encoder(x) # encoder_outputs, h = encoder(x) #gru @@ -192,7 +196,12 @@ # since the decoder is a "to-many" model we want to have # return_sequences=True -decoder_lstm = LSTM(LATENT_DIM, return_sequences=True, return_state=True, dropout=0.5) +decoder_lstm = LSTM( + LATENT_DIM, + return_sequences=True, + return_state=True, + # dropout=0.5 # dropout not available on gpu +) decoder_outputs, _, _ = decoder_lstm( decoder_inputs_x, initial_state=encoder_states From 158b5ac4f47f306bec1ba385ca6aa53690dad335 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 15 Jul 2018 21:38:18 -0400 Subject: [PATCH 064/329] update --- ann_class2/tensorflow2.py | 2 +- cnn_class/benchmark.py | 31 ++-- cnn_class/cnn_tf.py | 57 ++++++- cnn_class/cnn_tf_plot_filters.py | 214 +++++++++++++++++++++++++++ cnn_class/cnn_theano_plot_filters.py | 2 +- cnn_class/edge_benchmark.py | 12 +- 6 files changed, 282 insertions(+), 36 deletions(-) create mode 100644 cnn_class/cnn_tf_plot_filters.py diff --git a/ann_class2/tensorflow2.py b/ann_class2/tensorflow2.py index 458cb7f8..54959b0f 100644 --- a/ann_class2/tensorflow2.py +++ b/ann_class2/tensorflow2.py @@ -74,7 +74,7 @@ def main(): # softmax_cross_entropy_with_logits take in the "logits" # if you wanted to know the actual output of the neural net, # you could pass "Yish" into tf.nn.softmax(logits) - cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=Yish, labels=T)) + cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=Yish, labels=T)) # we choose the optimizer but don't implement the algorithm ourselves # let's go with RMSprop, since we just learned about it. diff --git a/cnn_class/benchmark.py b/cnn_class/benchmark.py index ecc89fa3..75c526dd 100644 --- a/cnn_class/benchmark.py +++ b/cnn_class/benchmark.py @@ -17,14 +17,6 @@ from datetime import datetime -def y2indicator(y): - N = len(y) - ind = np.zeros((N, 10)) - for i in range(N): - ind[i, y[i]] = 1 - return ind - - def error_rate(p, t): return np.mean(p != t) @@ -76,21 +68,19 @@ def main(): # Y is a N x 1 matrix with values 1..10 (MATLAB indexes by 1) # So flatten it and make it 0..9 # Also need indicator matrix for cost calculation - Xtrain = flatten(train['X'].astype(np.float32) / 255) + Xtrain = flatten(train['X'].astype(np.float32) / 255.) Ytrain = train['y'].flatten() - 1 Xtrain, Ytrain = shuffle(Xtrain, Ytrain) - Ytrain_ind = y2indicator(Ytrain) - Xtest = flatten(test['X'].astype(np.float32) / 255) + Xtest = flatten(test['X'].astype(np.float32) / 255.) Ytest = test['y'].flatten() - 1 - Ytest_ind = y2indicator(Ytest) # gradient descent params max_iter = 20 print_period = 10 N, D = Xtrain.shape batch_sz = 500 - n_batches = N / batch_sz + n_batches = N // batch_sz # initial weights M1 = 1000 # hidden layer size @@ -105,7 +95,7 @@ def main(): # define variables and expressions X = tf.placeholder(tf.float32, shape=(None, D), name='X') - T = tf.placeholder(tf.float32, shape=(None, K), name='T') + T = tf.placeholder(tf.int32, shape=(None,), name='T') W1 = tf.Variable(W1_init.astype(np.float32)) b1 = tf.Variable(b1_init.astype(np.float32)) W2 = tf.Variable(W2_init.astype(np.float32)) @@ -115,16 +105,19 @@ def main(): Z1 = tf.nn.relu( tf.matmul(X, W1) + b1 ) Z2 = tf.nn.relu( tf.matmul(Z1, W2) + b2 ) - Yish = tf.matmul(Z2, W3) + b3 + logits = tf.matmul(Z2, W3) + b3 cost = tf.reduce_sum( - tf.nn.softmax_cross_entropy_with_logits(logits=Yish, labels=T) + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, + labels=T + ) ) train_op = tf.train.RMSPropOptimizer(0.0001, decay=0.99, momentum=0.9).minimize(cost) # we'll use this to calculate the error rate - predict_op = tf.argmax(Yish, 1) + predict_op = tf.argmax(logits, 1) t0 = datetime.now() LL = [] @@ -135,11 +128,11 @@ def main(): for i in range(max_iter): for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] - Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] + Ybatch = Ytrain[j*batch_sz:(j*batch_sz + batch_sz),] session.run(train_op, feed_dict={X: Xbatch, T: Ybatch}) if j % print_period == 0: - test_cost = session.run(cost, feed_dict={X: Xtest, T: Ytest_ind}) + test_cost = session.run(cost, feed_dict={X: Xtest, T: Ytest}) prediction = session.run(predict_op, feed_dict={X: Xtest}) err = error_rate(prediction, Ytest) print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, test_cost, err)) diff --git a/cnn_class/cnn_tf.py b/cnn_class/cnn_tf.py index a152bbb1..ec768d75 100644 --- a/cnn_class/cnn_tf.py +++ b/cnn_class/cnn_tf.py @@ -20,7 +20,7 @@ from scipy.io import loadmat from sklearn.utils import shuffle -from benchmark import get_data, y2indicator, error_rate +from benchmark import get_data, error_rate def convpool(X, W, b): @@ -61,12 +61,10 @@ def main(): # print len(Ytrain) del train Xtrain, Ytrain = shuffle(Xtrain, Ytrain) - Ytrain_ind = y2indicator(Ytrain) Xtest = rearrange(test['X']) Ytest = test['y'].flatten() - 1 del test - Ytest_ind = y2indicator(Ytest) # gradient descent params max_iter = 6 @@ -81,7 +79,6 @@ def main(): Ytrain = Ytrain[:73000] Xtest = Xtest[:26000,] Ytest = Ytest[:26000] - Ytest_ind = Ytest_ind[:26000,] # print "Xtest.shape:", Xtest.shape # print "Ytest.shape:", Ytest.shape @@ -108,7 +105,7 @@ def main(): # define variables and expressions # using None as the first shape element takes up too much RAM unfortunately X = tf.placeholder(tf.float32, shape=(batch_sz, 32, 32, 3), name='X') - T = tf.placeholder(tf.float32, shape=(batch_sz, K), name='T') + T = tf.placeholder(tf.int32, shape=(batch_sz,), name='T') W1 = tf.Variable(W1_init.astype(np.float32)) b1 = tf.Variable(b1_init.astype(np.float32)) W2 = tf.Variable(W2_init.astype(np.float32)) @@ -126,7 +123,7 @@ def main(): Yish = tf.matmul(Z3, W4) + b4 cost = tf.reduce_sum( - tf.nn.softmax_cross_entropy_with_logits( + tf.nn.sparse_softmax_cross_entropy_with_logits( logits=Yish, labels=T ) @@ -139,6 +136,8 @@ def main(): t0 = datetime.now() LL = [] + W1_val = None + W2_val = None init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) @@ -146,7 +145,7 @@ def main(): for i in range(max_iter): for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] - Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] + Ybatch = Ytrain[j*batch_sz:(j*batch_sz + batch_sz),] if len(Xbatch) == batch_sz: session.run(train_op, feed_dict={X: Xbatch, T: Ybatch}) @@ -157,17 +156,59 @@ def main(): prediction = np.zeros(len(Xtest)) for k in range(len(Xtest) // batch_sz): Xtestbatch = Xtest[k*batch_sz:(k*batch_sz + batch_sz),] - Ytestbatch = Ytest_ind[k*batch_sz:(k*batch_sz + batch_sz),] + Ytestbatch = Ytest[k*batch_sz:(k*batch_sz + batch_sz),] test_cost += session.run(cost, feed_dict={X: Xtestbatch, T: Ytestbatch}) prediction[k*batch_sz:(k*batch_sz + batch_sz)] = session.run( predict_op, feed_dict={X: Xtestbatch}) err = error_rate(prediction, Ytest) print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, test_cost, err)) LL.append(test_cost) + + W1_val = W1.eval() + W2_val = W2.eval() print("Elapsed time:", (datetime.now() - t0)) plt.plot(LL) plt.show() + W1_val = W1_val.transpose(3, 2, 0, 1) + W2_val = W2_val.transpose(3, 2, 0, 1) + + + # visualize W1 (20, 3, 5, 5) + # W1_val = W1.get_value() + grid = np.zeros((8*5, 8*5)) + m = 0 + n = 0 + for i in range(20): + for j in range(3): + filt = W1_val[i,j] + grid[m*5:(m+1)*5,n*5:(n+1)*5] = filt + m += 1 + if m >= 8: + m = 0 + n += 1 + plt.imshow(grid, cmap='gray') + plt.title("W1") + plt.show() + + # visualize W2 (50, 20, 5, 5) + # W2_val = W2.get_value() + grid = np.zeros((32*5, 32*5)) + m = 0 + n = 0 + for i in range(50): + for j in range(20): + filt = W2_val[i,j] + grid[m*5:(m+1)*5,n*5:(n+1)*5] = filt + m += 1 + if m >= 32: + m = 0 + n += 1 + plt.imshow(grid, cmap='gray') + plt.title("W2") + plt.show() + + if __name__ == '__main__': main() diff --git a/cnn_class/cnn_tf_plot_filters.py b/cnn_class/cnn_tf_plot_filters.py new file mode 100644 index 00000000..8be13efe --- /dev/null +++ b/cnn_class/cnn_tf_plot_filters.py @@ -0,0 +1,214 @@ +# New concepts and differences from Theano: +# - stride is the interval at which to apply the convolution +# - unlike previous course, we use constant-size input to the network +# since not doing that caused us to start swapping +# - the output after convpool is a different size (8,8) here, (5,5) in Theano + +# https://deeplearningcourses.com/c/deep-learning-convolutional-neural-networks-theano-tensorflow +# https://udemy.com/deep-learning-convolutional-neural-networks-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt + +from datetime import datetime +from scipy.signal import convolve2d +from scipy.io import loadmat +from sklearn.utils import shuffle + +from benchmark import get_data, error_rate + + +def convpool(X, W, b): + # just assume pool size is (2,2) because we need to augment it with 1s + conv_out = tf.nn.conv2d(X, W, strides=[1, 1, 1, 1], padding='SAME') + conv_out = tf.nn.bias_add(conv_out, b) + pool_out = tf.nn.max_pool(conv_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') + return tf.nn.relu(pool_out) + + +def init_filter(shape, poolsz): + # w = np.random.randn(*shape) * np.sqrt(2) / np.sqrt(np.prod(shape[:-1]) + shape[-1]*np.prod(shape[:-2]) / np.prod(poolsz)) + w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[:-1])) + return w.astype(np.float32) + + +def rearrange(X): + # input is (32, 32, 3, N) + # output is (N, 32, 32, 3) + # N = X.shape[-1] + # out = np.zeros((N, 32, 32, 3), dtype=np.float32) + # for i in xrange(N): + # for j in xrange(3): + # out[i, :, :, j] = X[:, :, j, i] + # return out / 255 + return (X.transpose(3, 0, 1, 2) / 255).astype(np.float32) + + + +train, test = get_data() + +# Need to scale! don't leave as 0..255 +# Y is a N x 1 matrix with values 1..10 (MATLAB indexes by 1) +# So flatten it and make it 0..9 +# Also need indicator matrix for cost calculation +Xtrain = rearrange(train['X']) +Ytrain = train['y'].flatten() - 1 +# print len(Ytrain) +del train +Xtrain, Ytrain = shuffle(Xtrain, Ytrain) + +Xtest = rearrange(test['X']) +Ytest = test['y'].flatten() - 1 +del test + +# gradient descent params +max_iter = 6 +print_period = 10 +N = Xtrain.shape[0] +batch_sz = 500 +n_batches = N // batch_sz + +# limit samples since input will always have to be same size +# you could also just do N = N / batch_sz * batch_sz +Xtrain = Xtrain[:73000,] +Ytrain = Ytrain[:73000] +Xtest = Xtest[:26000,] +Ytest = Ytest[:26000] +# print "Xtest.shape:", Xtest.shape +# print "Ytest.shape:", Ytest.shape + +# initial weights +M = 500 +K = 10 +poolsz = (2, 2) + +W1_shape = (5, 5, 3, 20) # (filter_width, filter_height, num_color_channels, num_feature_maps) +W1_init = init_filter(W1_shape, poolsz) +b1_init = np.zeros(W1_shape[-1], dtype=np.float32) # one bias per output feature map + +W2_shape = (5, 5, 20, 50) # (filter_width, filter_height, old_num_feature_maps, num_feature_maps) +W2_init = init_filter(W2_shape, poolsz) +b2_init = np.zeros(W2_shape[-1], dtype=np.float32) + +# vanilla ANN weights +W3_init = np.random.randn(W2_shape[-1]*8*8, M) / np.sqrt(W2_shape[-1]*8*8 + M) +b3_init = np.zeros(M, dtype=np.float32) +W4_init = np.random.randn(M, K) / np.sqrt(M + K) +b4_init = np.zeros(K, dtype=np.float32) + + +# define variables and expressions +# using None as the first shape element takes up too much RAM unfortunately +X = tf.placeholder(tf.float32, shape=(batch_sz, 32, 32, 3), name='X') +T = tf.placeholder(tf.int32, shape=(batch_sz,), name='T') +W1 = tf.Variable(W1_init.astype(np.float32)) +b1 = tf.Variable(b1_init.astype(np.float32)) +W2 = tf.Variable(W2_init.astype(np.float32)) +b2 = tf.Variable(b2_init.astype(np.float32)) +W3 = tf.Variable(W3_init.astype(np.float32)) +b3 = tf.Variable(b3_init.astype(np.float32)) +W4 = tf.Variable(W4_init.astype(np.float32)) +b4 = tf.Variable(b4_init.astype(np.float32)) +params = [W1, b1, W2, b2, W3, b3, W4, b4] + +Z1 = convpool(X, W1, b1) +Z2 = convpool(Z1, W2, b2) +Z2_shape = Z2.get_shape().as_list() +Z2r = tf.reshape(Z2, [Z2_shape[0], np.prod(Z2_shape[1:])]) +Z3 = tf.nn.relu( tf.matmul(Z2r, W3) + b3 ) +Yish = tf.matmul(Z3, W4) + b4 + +cost = tf.reduce_sum( + tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=Yish, + labels=T + ) +) + 0.01*sum(tf.reduce_sum(p*p) for p in params) + +train_op = tf.train.RMSPropOptimizer(0.0001, decay=0.99, momentum=0.9).minimize(cost) +# train_op = tf.train.MomentumOptimizer(0.00001, momentum=0.99).minimize(cost) + +# we'll use this to calculate the error rate +predict_op = tf.argmax(Yish, 1) + +t0 = datetime.now() +LL = [] +W1_val = None +W2_val = None +init = tf.global_variables_initializer() +with tf.Session() as session: + session.run(init) + + for i in range(max_iter): + for j in range(n_batches): + Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] + Ybatch = Ytrain[j*batch_sz:(j*batch_sz + batch_sz),] + + if len(Xbatch) == batch_sz: + session.run(train_op, feed_dict={X: Xbatch, T: Ybatch}) + if j % print_period == 0: + # due to RAM limitations we need to have a fixed size input + # so as a result, we have this ugly total cost and prediction computation + test_cost = 0 + prediction = np.zeros(len(Xtest)) + for k in range(len(Xtest) // batch_sz): + Xtestbatch = Xtest[k*batch_sz:(k*batch_sz + batch_sz),] + Ytestbatch = Ytest[k*batch_sz:(k*batch_sz + batch_sz),] + test_cost += session.run(cost, feed_dict={X: Xtestbatch, T: Ytestbatch}) + prediction[k*batch_sz:(k*batch_sz + batch_sz)] = session.run( + predict_op, feed_dict={X: Xtestbatch}) + err = error_rate(prediction, Ytest) + print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, test_cost, err)) + LL.append(test_cost) + + W1_val = W1.eval() + W2_val = W2.eval() +print("Elapsed time:", (datetime.now() - t0)) +plt.plot(LL) +plt.show() + + +W1_val = W1_val.transpose(3, 2, 0, 1) +W2_val = W2_val.transpose(3, 2, 0, 1) + + +# visualize W1 (20, 3, 5, 5) +# W1_val = W1.get_value() +grid = np.zeros((8*5, 8*5)) +m = 0 +n = 0 +for i in range(20): + for j in range(3): + filt = W1_val[i,j] + grid[m*5:(m+1)*5,n*5:(n+1)*5] = filt + m += 1 + if m >= 8: + m = 0 + n += 1 +plt.imshow(grid, cmap='gray') +plt.title("W1") +plt.show() + +# visualize W2 (50, 20, 5, 5) +# W2_val = W2.get_value() +grid = np.zeros((32*5, 32*5)) +m = 0 +n = 0 +for i in range(50): + for j in range(20): + filt = W2_val[i,j] + grid[m*5:(m+1)*5,n*5:(n+1)*5] = filt + m += 1 + if m >= 32: + m = 0 + n += 1 +plt.imshow(grid, cmap='gray') +plt.title("W2") +plt.show() + + diff --git a/cnn_class/cnn_theano_plot_filters.py b/cnn_class/cnn_theano_plot_filters.py index 9b385640..931a4754 100644 --- a/cnn_class/cnn_theano_plot_filters.py +++ b/cnn_class/cnn_theano_plot_filters.py @@ -103,7 +103,7 @@ def main(): # define the cost function and prediction params = (W1, b1, W2, b2, W3, b3, W4, b4) - reg_cost = reg*np.sum((param*param).sum() for param in params) + reg_cost = reg*sum((param*param).sum() for param in params) cost = -(Y * T.log(pY)).sum() + reg_cost prediction = T.argmax(pY, axis=1) diff --git a/cnn_class/edge_benchmark.py b/cnn_class/edge_benchmark.py index 0a4e26a0..de605634 100644 --- a/cnn_class/edge_benchmark.py +++ b/cnn_class/edge_benchmark.py @@ -14,7 +14,7 @@ from scipy.io import loadmat from sklearn.utils import shuffle -from benchmark import y2indicator, error_rate +from benchmark import error_rate Hx = np.array([ @@ -58,14 +58,12 @@ def main(): Xtrain = convolve_flatten(train['X'].astype(np.float32)) Ytrain = train['y'].flatten() - 1 Xtrain, Ytrain = shuffle(Xtrain, Ytrain) - Ytrain_ind = y2indicator(Ytrain) Xtest = convolve_flatten(test['X'].astype(np.float32)) Ytest = test['y'].flatten() - 1 - Ytest_ind = y2indicator(Ytest) # gradient descent params - max_iter = 20 + max_iter = 6 print_period = 10 N, D = Xtrain.shape batch_sz = 500 @@ -97,7 +95,7 @@ def main(): Yish = tf.matmul(Z2, W3) + b3 cost = tf.reduce_sum( - tf.nn.softmax_cross_entropy_with_logits( + tf.nn.sparse_softmax_cross_entropy_with_logits( logits=Yish, labels=T ) @@ -116,11 +114,11 @@ def main(): for i in range(max_iter): for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] - Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] + Ybatch = Ytrain[j*batch_sz:(j*batch_sz + batch_sz),] session.run(train_op, feed_dict={X: Xbatch, T: Ybatch}) if j % print_period == 0: - test_cost = session.run(cost, feed_dict={X: Xtest, T: Ytest_ind}) + test_cost = session.run(cost, feed_dict={X: Xtest, T: Ytest}) prediction = session.run(predict_op, feed_dict={X: Xtest}) err = error_rate(prediction, Ytest) print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, test_cost, err)) From ae7925dda84eff6da006c29231161d37a4dfb3ed Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 16 Jul 2018 02:55:27 -0400 Subject: [PATCH 065/329] update --- ann_class2/adam.py | 6 +- ann_class2/batch_norm_tf.py | 6 +- ann_class2/batch_norm_theano.py | 6 +- ann_class2/cntk_example.py | 15 ++-- ann_class2/dropout_tensorflow.py | 15 ++-- ann_class2/dropout_theano.py | 14 ++-- ann_class2/keras_example.py | 11 +-- ann_class2/momentum.py | 8 +-- ann_class2/mxnet_example.py | 13 +--- ann_class2/pytorch_example.py | 12 +--- ann_class2/pytorch_example2.py | 12 +--- ann_class2/rmsprop.py | 6 +- ann_class2/sgd.py | 34 ++++----- ann_class2/tensorflow2.py | 8 +-- ann_class2/tf_with_save.py | 12 ++-- ann_class2/theano2.py | 18 ++--- ann_class2/theano_gpu.py | 114 ------------------------------- ann_class2/util.py | 87 ++++++++++++----------- 18 files changed, 110 insertions(+), 287 deletions(-) delete mode 100644 ann_class2/theano_gpu.py diff --git a/ann_class2/adam.py b/ann_class2/adam.py index 182b7fcc..1ae7813a 100644 --- a/ann_class2/adam.py +++ b/ann_class2/adam.py @@ -19,13 +19,9 @@ def main(): max_iter = 10 print_period = 10 - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() reg = 0.01 - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] Ytrain_ind = y2indicator(Ytrain) Ytest_ind = y2indicator(Ytest) diff --git a/ann_class2/batch_norm_tf.py b/ann_class2/batch_norm_tf.py index 719a5506..de25cd3b 100644 --- a/ann_class2/batch_norm_tf.py +++ b/ann_class2/batch_norm_tf.py @@ -183,11 +183,7 @@ def predict(self, X): def main(): # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() - # Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.03) - X, Y = shuffle(X, Y) - Xtrain, Ytrain = X[:-1000], Y[:-1000] - Xtest, Ytest = X[-1000:], Y[-1000:] + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() ann = ANN([500, 300]) diff --git a/ann_class2/batch_norm_theano.py b/ann_class2/batch_norm_theano.py index ca40d10d..cfb9d999 100644 --- a/ann_class2/batch_norm_theano.py +++ b/ann_class2/batch_norm_theano.py @@ -202,11 +202,7 @@ def score(self, X, Y): def main(): # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() - # Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.3) - X, Y = shuffle(X, Y) - Xtrain, Ytrain = X[:-1000], Y[:-1000] - Xtest, Ytest = X[-1000:], Y[-1000:] + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() ann = ANN([500, 300]) ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True) diff --git a/ann_class2/cntk_example.py b/ann_class2/cntk_example.py index 6a7abcd4..29564592 100644 --- a/ann_class2/cntk_example.py +++ b/ann_class2/cntk_example.py @@ -37,22 +37,21 @@ # get the data, same as Theano + Tensorflow examples -X, Y = get_normalized_data() +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() # get shapes -N, D = X.shape -K = len(set(Y)) +N, D = Xtrain.shape +K = len(set(Ytrain)) # we want one-hot encoded labels -Y = y2indicator(Y) +Ytrain = y2indicator(Ytrain) +Ytest = y2indicator(Ytest) # split the data X = X.astype(np.float32) Y = Y.astype(np.float32) -Xtrain = X[:-1000,] -Ytrain = Y[:-1000] -Xtest = X[-1000:,] -Ytest = Y[-1000:] +Xtest = Xtest.astype(np.float32) +Ytest = Ytest.astype(np.float32) # the model will be a sequence of layers diff --git a/ann_class2/dropout_tensorflow.py b/ann_class2/dropout_tensorflow.py index 76b8bac8..b20c44fb 100644 --- a/ann_class2/dropout_tensorflow.py +++ b/ann_class2/dropout_tensorflow.py @@ -33,16 +33,11 @@ def __init__(self, hidden_layer_sizes, p_keep): self.hidden_layer_sizes = hidden_layer_sizes self.dropout_rates = p_keep - def fit(self, X, Y, lr=1e-4, mu=0.9, decay=0.9, epochs=15, batch_sz=100, split=True, print_every=20): - # make a validation set - X, Y = shuffle(X, Y) + def fit(self, X, Y, Xvalid, Yvalid, lr=1e-4, mu=0.9, decay=0.9, epochs=15, batch_sz=100, print_every=50): X = X.astype(np.float32) Y = Y.astype(np.int64) - if split: - Xvalid, Yvalid = X[-1000:], Y[-1000:] - X, Y = X[:-1000], Y[:-1000] - else: - Xvalid, Yvalid = X, Y + Xvalid = Xvalid.astype(np.float32) + Yvalid = Yvalid.astype(np.int64) # initialize hidden layers N, D = X.shape @@ -143,10 +138,10 @@ def relu(a): def main(): # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() ann = ANN([500, 300], [0.8, 0.5, 0.5]) - ann.fit(X, Y) + ann.fit(Xtrain, Ytrain, Xtest, Ytest) if __name__ == '__main__': diff --git a/ann_class2/dropout_theano.py b/ann_class2/dropout_theano.py index a84a69d8..81e5bebe 100644 --- a/ann_class2/dropout_theano.py +++ b/ann_class2/dropout_theano.py @@ -39,13 +39,11 @@ def __init__(self, hidden_layer_sizes, p_keep): self.hidden_layer_sizes = hidden_layer_sizes self.dropout_rates = p_keep - def fit(self, X, Y, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=100, show_fig=False): - # make a validation set - X, Y = shuffle(X, Y) + def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=100, show_fig=False): X = X.astype(np.float32) Y = Y.astype(np.int32) - Xvalid, Yvalid = X[-1000:], Y[-1000:] - X, Y = X[:-1000], Y[:-1000] + Xvalid = Xvalid.astype(np.float32) + Yvalid = Yvalid.astype(np.int32) self.rng = RandomStreams() @@ -125,7 +123,7 @@ def fit(self, X, Y, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=10 train_op(Xbatch, Ybatch) - if j % 20 == 0: + if j % 50 == 0: c, p = cost_predict_op(Xvalid, Yvalid) costs.append(c) e = error_rate(Yvalid, p) @@ -166,10 +164,10 @@ def relu(a): def main(): # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() ann = ANN([500, 300], [0.8, 0.5, 0.5]) - ann.fit(X, Y, show_fig=True) + ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True) if __name__ == '__main__': diff --git a/ann_class2/keras_example.py b/ann_class2/keras_example.py index d93b1108..0fb28736 100644 --- a/ann_class2/keras_example.py +++ b/ann_class2/keras_example.py @@ -19,17 +19,18 @@ # get the data, same as Theano + Tensorflow examples # no need to split now, the fit() function will do it -X, Y = get_normalized_data() +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() # get shapes -N, D = X.shape -K = len(set(Y)) +N, D = Xtrain.shape +K = len(set(Ytrain)) # by default Keras wants one-hot encoded labels # there's another cost function we can use # where we can just pass in the integer labels directly # just like Tensorflow / Theano -Y = y2indicator(Y) +Ytrain = y2indicator(Ytrain) +Ytest = y2indicator(Ytest) # the model will be a sequence of layers @@ -60,7 +61,7 @@ # gives us back a -r = model.fit(X, Y, validation_split=0.33, epochs=15, batch_size=32) +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) print("Returned:", r) # print the available keys diff --git a/ann_class2/momentum.py b/ann_class2/momentum.py index 14b8c735..5df6fa30 100644 --- a/ann_class2/momentum.py +++ b/ann_class2/momentum.py @@ -26,16 +26,12 @@ def main(): # 3. batch SGD with Nesterov momentum max_iter = 20 # make it 30 for sigmoid - print_period = 10 + print_period = 50 - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() lr = 0.00004 reg = 0.01 - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] Ytrain_ind = y2indicator(Ytrain) Ytest_ind = y2indicator(Ytest) diff --git a/ann_class2/mxnet_example.py b/ann_class2/mxnet_example.py index f716e6f4..9ea745fc 100644 --- a/ann_class2/mxnet_example.py +++ b/ann_class2/mxnet_example.py @@ -27,18 +27,11 @@ # get the data, same as Theano + Tensorflow examples # no need to split now, the fit() function will do it -X, Y = get_normalized_data() +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() # get shapes -N, D = X.shape -K = len(set(Y)) - -# split the data -Xtrain = X[:-1000,] -Ytrain = Y[:-1000] -Xtest = X[-1000:,] -Ytest = Y[-1000:] - +N, D = Xtrain.shape +K = len(set(Ytrain)) # training config batch_size = 32 diff --git a/ann_class2/pytorch_example.py b/ann_class2/pytorch_example.py index 68010dd0..bd6af2a8 100644 --- a/ann_class2/pytorch_example.py +++ b/ann_class2/pytorch_example.py @@ -26,17 +26,11 @@ # get the data, same as Theano + Tensorflow examples # no need to split now, the fit() function will do it -X, Y = get_normalized_data() +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() # get shapes -_, D = X.shape -K = len(set(Y)) - -# split the data -Xtrain = X[:-1000,] -Ytrain = Y[:-1000] -Xtest = X[-1000:,] -Ytest = Y[-1000:] +_, D = Xtrain.shape +K = len(set(Ytrain)) # Note: no need to convert Y to indicator matrix diff --git a/ann_class2/pytorch_example2.py b/ann_class2/pytorch_example2.py index 40a638e2..9822cd3e 100644 --- a/ann_class2/pytorch_example2.py +++ b/ann_class2/pytorch_example2.py @@ -26,17 +26,11 @@ # get the data, same as Theano + Tensorflow examples # no need to split now, the fit() function will do it -X, Y = get_normalized_data() +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() # get shapes -_, D = X.shape -K = len(set(Y)) - -# split the data -Xtrain = X[:-1000,] -Ytrain = Y[:-1000] -Xtest = X[-1000:,] -Ytest = Y[-1000:] +_, D = Xtrain.shape +K = len(set(Ytrain)) # Note: no need to convert Y to indicator matrix diff --git a/ann_class2/rmsprop.py b/ann_class2/rmsprop.py index 27ee5941..518d0b98 100644 --- a/ann_class2/rmsprop.py +++ b/ann_class2/rmsprop.py @@ -19,14 +19,10 @@ def main(): max_iter = 20 # make it 30 for sigmoid print_period = 10 - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() lr = 0.00004 reg = 0.01 - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] Ytrain_ind = y2indicator(Ytrain) Ytest_ind = y2indicator(Ytest) diff --git a/ann_class2/sgd.py b/ann_class2/sgd.py index eb7bf529..abc77e27 100644 --- a/ann_class2/sgd.py +++ b/ann_class2/sgd.py @@ -28,19 +28,8 @@ def main(): - X, Y, _, _ = get_transformed_data() - X = X[:, :300] - - # normalize X first - mu = X.mean(axis=0) - std = X.std(axis=0) - X = (X - mu) / std - + Xtrain, Xtest, Ytrain, Ytest = get_transformed_data() print("Performing logistic regression...") - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] N, D = Xtrain.shape Ytrain_ind = y2indicator(Ytrain) @@ -53,7 +42,7 @@ def main(): lr = 0.0001 reg = 0.01 t0 = datetime.now() - for i in range(200): + for i in range(50): p_y = forward(Xtrain, W, b) W += lr*(gradW(Ytrain_ind, p_y, Xtrain) - reg*W) @@ -63,10 +52,11 @@ def main(): p_y_test = forward(Xtest, W, b) ll = cost(p_y_test, Ytest_ind) LL.append(ll) - if i % 10 == 0: + if i % 1 == 0: err = error_rate(p_y_test, Ytest) - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + if i % 10 == 0: + print("Cost at iteration %d: %.6f" % (i, ll)) + print("Error rate:", err) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) print("Elapsted time for full GD:", datetime.now() - t0) @@ -80,7 +70,7 @@ def main(): reg = 0.01 t0 = datetime.now() - for i in range(1): # takes very long since we're computing cost for 41k samples + for i in range(50): # takes very long since we're computing cost for 41k samples tmpX, tmpY = shuffle(Xtrain, Ytrain_ind) for n in range(min(N, 500)): # shortcut so it won't take so long... x = tmpX[n,:].reshape(1,D) @@ -94,8 +84,9 @@ def main(): ll = cost(p_y_test, Ytest_ind) LL_stochastic.append(ll) - if n % (N//2) == 0: - err = error_rate(p_y_test, Ytest) + if i % 1 == 0: + err = error_rate(p_y_test, Ytest) + if i % 10 == 0: print("Cost at iteration %d: %.6f" % (i, ll)) print("Error rate:", err) p_y = forward(Xtest, W, b) @@ -126,8 +117,9 @@ def main(): p_y_test = forward(Xtest, W, b) ll = cost(p_y_test, Ytest_ind) LL_batch.append(ll) - if j % (n_batches//2) == 0: - err = error_rate(p_y_test, Ytest) + if i % 1 == 0: + err = error_rate(p_y_test, Ytest) + if i % 10 == 0: print("Cost at iteration %d: %.6f" % (i, ll)) print("Error rate:", err) p_y = forward(Xtest, W, b) diff --git a/ann_class2/tensorflow2.py b/ann_class2/tensorflow2.py index 54959b0f..6641f5b0 100644 --- a/ann_class2/tensorflow2.py +++ b/ann_class2/tensorflow2.py @@ -25,18 +25,14 @@ def error_rate(p, t): # copy this first part from theano2.py def main(): # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() max_iter = 15 - print_period = 10 + print_period = 50 lr = 0.00004 reg = 0.01 - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] Ytrain_ind = y2indicator(Ytrain) Ytest_ind = y2indicator(Ytest) diff --git a/ann_class2/tf_with_save.py b/ann_class2/tf_with_save.py index 68f41481..43e20503 100644 --- a/ann_class2/tf_with_save.py +++ b/ann_class2/tf_with_save.py @@ -5,6 +5,7 @@ # Note: you may need to update your version of future # sudo pip install -U future +import os import json import numpy as np import tensorflow as tf @@ -78,7 +79,7 @@ def fit(self, X, Y, Xtest, Ytest): Ybatch = Y[j*batch_sz:(j*batch_sz + batch_sz),] session.run(train_op, feed_dict={self.inputs: Xbatch, self.targets: Ybatch}) - if j % 100 == 0: + if j % 200 == 0: test_cost = session.run(cost, feed_dict={self.inputs: Xtest, self.targets: Ytest}) Ptest = session.run(self.predict_op, feed_dict={self.inputs: Xtest}) err = error_rate(Ptest, Ytest) @@ -124,14 +125,9 @@ def load(filename): def main(): - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] - - model = TFLogistic("tf.model") + model = TFLogistic("./tf.model") model.fit(Xtrain, Ytrain, Xtest, Ytest) # test out restoring the model via the predict function diff --git a/ann_class2/theano2.py b/ann_class2/theano2.py index 8ebb4f2b..9712af21 100644 --- a/ann_class2/theano2.py +++ b/ann_class2/theano2.py @@ -28,20 +28,18 @@ def relu(a): def main(): # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() max_iter = 20 print_period = 10 - lr = 0.00004 + lr = 0.0004 reg = 0.01 - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] - Ytrain_ind = y2indicator(Ytrain) - Ytest_ind = y2indicator(Ytest) + Xtrain = Xtest.astype(np.float32) + Ytrain = Ytest.astype(np.float32) + Ytrain_ind = y2indicator(Ytrain).astype(np.float32) + Ytest_ind = y2indicator(Ytest).astype(np.float32) N, D = Xtrain.shape batch_sz = 500 @@ -72,10 +70,6 @@ def main(): # step 3: training expressions and functions # we can just include regularization as part of the cost because it is also automatically differentiated! - # update_W1 = W1 - lr*(T.grad(cost, W1) + reg*W1) - # update_b1 = b1 - lr*(T.grad(cost, b1) + reg*b1) - # update_W2 = W2 - lr*(T.grad(cost, W2) + reg*W2) - # update_b2 = b2 - lr*(T.grad(cost, b2) + reg*b2) update_W1 = W1 - lr*T.grad(cost, W1) update_b1 = b1 - lr*T.grad(cost, b1) update_W2 = W2 - lr*T.grad(cost, W2) diff --git a/ann_class2/theano_gpu.py b/ann_class2/theano_gpu.py deleted file mode 100644 index a6da9152..00000000 --- a/ann_class2/theano_gpu.py +++ /dev/null @@ -1,114 +0,0 @@ -# A 1-hidden-layer neural network in Theano. -# This code is not optimized for speed. -# It's just to get something working, using the principles we know. - -# For the class Data Science: Practical Deep Learning Concepts in Theano and TensorFlow -# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow -# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - -import numpy as np -import theano -import theano.tensor as T -from datetime import datetime - -from util import get_normalized_data, y2indicator - - -def error_rate(p, t): - return np.mean(p != t) - - -def relu(a): - return a * (a > 0) - - -def main(): - # step 1: get the data and define all the usual variables - X, Y = get_normalized_data() - - max_iter = 20 - print_period = 10 - - lr = 0.00004 - reg = 0.01 - - X = X.astype(np.float32) - Y = Y.astype(np.float32) - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] - Ytrain_ind = y2indicator(Ytrain).astype(np.float32) - Ytest_ind = y2indicator(Ytest).astype(np.float32) - - N, D = Xtrain.shape - batch_sz = 500 - n_batches = N // batch_sz - - M = 300 - K = 10 - W1_init = np.random.randn(D, M) / 28 - b1_init = np.zeros(M) - W2_init = np.random.randn(M, K) / np.sqrt(M) - b2_init = np.zeros(K) - - # step 2: define theano variables and expressions - thX = T.matrix('X') - thT = T.matrix('T') - W1 = theano.shared(W1_init.astype(np.float32), 'W1') - b1 = theano.shared(b1_init.astype(np.float32), 'b1') - W2 = theano.shared(W2_init.astype(np.float32), 'W2') - b2 = theano.shared(b2_init.astype(np.float32), 'b2') - - # we can use the built-in theano functions to do relu and softmax - thZ = relu( thX.dot(W1) + b1 ) # relu is new in version 0.7.1 but just in case you don't have it - thY = T.nnet.softmax( thZ.dot(W2) + b2 ) - - # define the cost function and prediction - cost = -(thT * T.log(thY)).sum() + reg*((W1*W1).sum() + (b1*b1).sum() + (W2*W2).sum() + (b2*b2).sum()) - prediction = T.argmax(thY, axis=1) - - # step 3: training expressions and functions - # we can just include regularization as part of the cost because it is also automatically differentiated! - # update_W1 = W1 - lr*(T.grad(cost, W1) + reg*W1) - # update_b1 = b1 - lr*(T.grad(cost, b1) + reg*b1) - # update_W2 = W2 - lr*(T.grad(cost, W2) + reg*W2) - # update_b2 = b2 - lr*(T.grad(cost, b2) + reg*b2) - update_W1 = W1 - lr*T.grad(cost, W1) - update_b1 = b1 - lr*T.grad(cost, b1) - update_W2 = W2 - lr*T.grad(cost, W2) - update_b2 = b2 - lr*T.grad(cost, b2) - - train = theano.function( - inputs=[thX, thT], - updates=[(W1, update_W1), (b1, update_b1), (W2, update_W2), (b2, update_b2)], - ) - - # create another function for this because we want it over the whole dataset - get_prediction = theano.function( - inputs=[thX, thT], - outputs=[cost, prediction], - ) - - t0 = datetime.now() - for i in range(max_iter): - for j in range(n_batches): - Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] - Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] - - train(Xbatch, Ybatch) - if j % print_period == 0: - cost_val, prediction_val = get_prediction(Xtest, Ytest_ind) - err = error_rate(prediction_val, Ytest) - print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, cost_val, err)) - - print("Training time:", datetime.now() - t0) - # how would you incorporate momentum into the gradient descent procedure? - - -if __name__ == '__main__': - main() diff --git a/ann_class2/util.py b/ann_class2/util.py index 2be2ab40..5c8ad934 100644 --- a/ann_class2/util.py +++ b/ann_class2/util.py @@ -78,19 +78,40 @@ def get_transformed_data(): exit() df = pd.read_csv('../large_files/train.csv') - data = df.as_matrix().astype(np.float32) + data = df.values.astype(np.float32) np.random.shuffle(data) X = data[:, 1:] - mu = X.mean(axis=0) - X = X - mu # center the data - pca = PCA() - Z = pca.fit_transform(X) Y = data[:, 0].astype(np.int32) + Xtrain = X[:-1000] + Ytrain = Y[:-1000] + Xtest = X[-1000:] + Ytest = Y[-1000:] + + # center the data + mu = Xtrain.mean(axis=0) + Xtrain = Xtrain - mu + Xtest = Xtest - mu + + # transform the data + pca = PCA() + Ztrain = pca.fit_transform(Xtrain) + Ztest = pca.transform(Xtest) + plot_cumulative_variance(pca) - return Z, Y, pca, mu + # take first 300 cols of Z + Ztrain = Ztrain[:, :300] + Ztest = Ztest[:, :300] + + # normalize Z + mu = Ztrain.mean(axis=0) + std = Ztrain.std(axis=0) + Ztrain = (Ztrain - mu) / std + Ztest = (Ztest - mu) / std + + return Ztrain, Ztest, Ytrain, Ytest def get_normalized_data(): @@ -104,15 +125,24 @@ def get_normalized_data(): exit() df = pd.read_csv('../large_files/train.csv') - data = df.as_matrix().astype(np.float32) + data = df.values.astype(np.float32) np.random.shuffle(data) X = data[:, 1:] - mu = X.mean(axis=0) - std = X.std(axis=0) - np.place(std, std == 0, 1) - X = (X - mu) / std # normalize the data Y = data[:, 0] - return X, Y + + Xtrain = X[:-1000] + Ytrain = Y[:-1000] + Xtest = X[-1000:] + Ytest = Y[-1000:] + + # normalize the data + mu = Xtrain.mean(axis=0) + std = Xtrain.std(axis=0) + np.place(std, std == 0, 1) + Xtrain = (Xtrain - mu) / std + Xtest = (Xtest - mu) / std + + return Xtrain, Xtest, Ytrain, Ytest def plot_cumulative_variance(pca): @@ -167,25 +197,11 @@ def y2indicator(y): def benchmark_full(): - X, Y = get_normalized_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() print("Performing logistic regression...") # lr = LogisticRegression(solver='lbfgs') - # # test on the last 1000 points - # lr.fit(X[:-1000, :200], Y[:-1000]) # use only first 200 dimensions - # print lr.score(X[-1000:, :200], Y[-1000:]) - # print "X:", X - - # normalize X first - # mu = X.mean(axis=0) - # std = X.std(axis=0) - # X = (X - mu) / std - - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] # convert Ytrain and Ytest to (N x K) matrices of indicator variables N, D = Xtrain.shape @@ -237,19 +253,8 @@ def benchmark_full(): def benchmark_pca(): - X, Y, _, _ = get_transformed_data() - X = X[:, :300] - - # normalize X first - mu = X.mean(axis=0) - std = X.std(axis=0) - X = (X - mu) / std - + Xtrain, Xtest, Ytrain, Ytest = get_transformed_data() print("Performing logistic regression...") - Xtrain = X[:-1000,] - Ytrain = Y[:-1000] - Xtest = X[-1000:,] - Ytest = Y[-1000:] N, D = Xtrain.shape Ytrain_ind = np.zeros((N, 10)) @@ -299,6 +304,6 @@ def benchmark_pca(): if __name__ == '__main__': - benchmark_pca() - # benchmark_full() + # benchmark_pca() + benchmark_full() From bf41736279f9edbfe128bb4f5667e6b5d1ecc3e5 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 16 Jul 2018 03:58:40 -0400 Subject: [PATCH 066/329] update --- ann_class/tf_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ann_class/tf_example.py b/ann_class/tf_example.py index 0e4e37c6..503aa9a7 100644 --- a/ann_class/tf_example.py +++ b/ann_class/tf_example.py @@ -58,7 +58,7 @@ def forward(X, W1, b1, W2, b2): logits = forward(tfX, W1, b1, W2, b2) cost = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( + tf.nn.softmax_cross_entropy_with_logits_v2( labels=tfY, logits=logits ) From f24ce12d26e59c84a0eb103c18fd7937ae320782 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 22 Jul 2018 22:29:54 -0400 Subject: [PATCH 067/329] ml basics --- numpy_class/classification_example.py | 99 +++++++++++++++++++++++++ numpy_class/regression_example.py | 102 ++++++++++++++++++++++++++ 2 files changed, 201 insertions(+) create mode 100644 numpy_class/classification_example.py create mode 100644 numpy_class/regression_example.py diff --git a/numpy_class/classification_example.py b/numpy_class/classification_example.py new file mode 100644 index 00000000..c791d928 --- /dev/null +++ b/numpy_class/classification_example.py @@ -0,0 +1,99 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +# just in case we need it +import numpy as np + + +# import the function that will get the data +# yes, sklearn comes with built-in datasets! +from sklearn.datasets import load_breast_cancer + +# load the data +data = load_breast_cancer() + +# check the type of 'data' +type(data) + +# note: it is a Bunch object +# this basically acts like a dictionary where you can treat the keys like attributes +data.keys() + +# 'data' (the attribute) means the input data +data.data.shape +# it has 569 samples, 30 features + +# 'targets' +data.target +# note how the targets are just 0s and 1s +# normally, when you have K targets, they are labeled 0..K-1 + +# their meaning is not lost +data.target_names + +# there are also 569 corresponding targets +data.target.shape + +# you can also determinw the meaning of each feature +data.feature_names + + +# normally we would put all of our imports at the top +# but this lets us tell a story +from sklearn.model_selection import train_test_split + + +# split the data into train and test sets +# this lets us simulate how our model will perform in the future +X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.33) + + +# instantiate a classifer and train it +from sklearn.ensemble import RandomForestClassifier + + +model = RandomForestClassifier() +model.fit(X_train, y_train) + + +# evaluate the model's performance +model.score(X_train, y_train) +model.score(X_test, y_test) + + +# how you can make predictions +predictions = model.predict(X_test) + +# what did we get? +predictions + +# manually check the accuracy of your predictions +N = len(y_test) +np.sum(predictions == y_test) / N # can also just call np.mean() + + + +# we can even use deep learning to solve the same problem! +from sklearn.neural_network import MLPClassifier + +# you'll learn why scaling is needed in a later course +from sklearn.preprocessing import StandardScaler + +scaler = StandardScaler() +X_train2 = scaler.fit_transform(X_train) +X_test2 = scaler.transform(X_test) + +model = MLPClassifier(max_iter=500) +model.fit(X_train2, y_train) + + +# evaluate the model's performance +model.score(X_train2, y_train) +model.score(X_test2, y_test) \ No newline at end of file diff --git a/numpy_class/regression_example.py b/numpy_class/regression_example.py new file mode 100644 index 00000000..ea5861dd --- /dev/null +++ b/numpy_class/regression_example.py @@ -0,0 +1,102 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +# Get the data from: +# https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +# just in case we need it +import numpy as np +import pandas as pd + + +# load the data +# important note: this is where we will usually put data files +df = pd.read_csv('../large_files/airfoil_self_noise.dat', sep='\t', header=None) + +# check the data +df.head() +df.info() + +# get the inputs +data = df[[0,1,2,3,4]].values + +# get the outputs +target = df[5].values + +# tiny update: pandas is moving from .as_matrix() to the equivalent .values + + +# normally we would put all of our imports at the top +# but this lets us tell a story +from sklearn.model_selection import train_test_split + + +# split the data into train and test sets +# this lets us simulate how our model will perform in the future +X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.33) + + +# instantiate a classifer and train it +from sklearn.linear_model import LinearRegression + + +model = LinearRegression() +model.fit(X_train, y_train) + + +# evaluate the model's performance +print(model.score(X_train, y_train)) +print(model.score(X_test, y_test)) + + +# how you can make predictions +predictions = model.predict(X_test) + +# what did we get? +predictions + + + +# we can even use random forest to solve the same problem! +from sklearn.ensemble import RandomForestRegressor + +model2 = RandomForestRegressor() +model2.fit(X_train, y_train) + + +# evaluate the model's performance +print(model2.score(X_train, y_train)) +print(model2.score(X_test, y_test)) + + + + +# we can even use deep learning to solve the same problem! +from sklearn.neural_network import MLPRegressor + +# you'll learn why scaling is needed in a later course +from sklearn.preprocessing import StandardScaler + +scaler = StandardScaler() +X_train2 = scaler.fit_transform(X_train) +X_test2 = scaler.transform(X_test) +scaler2 = StandardScaler() +y_train2 = scaler2.fit_transform(np.expand_dims(y_train, -1)).ravel() +y_test2 = scaler2.fit_transform(np.expand_dims(y_test, -1)).ravel() + +model = MLPRegressor(max_iter=500) +model.fit(X_train2, y_train2) + + +# evaluate the model's performance +print(model.score(X_train2, y_train2)) +print(model.score(X_test2, y_test2)) +# not as good as a random forest! +# but not as bad as linear regression From 64c1437dc4eb8684cf91120ac1a3bdb87e995c11 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 22 Jul 2018 22:59:21 -0400 Subject: [PATCH 068/329] update youtube url --- numpy_class/classification_example.py | 1 + numpy_class/regression_example.py | 1 + 2 files changed, 2 insertions(+) diff --git a/numpy_class/classification_example.py b/numpy_class/classification_example.py index c791d928..bc65aeda 100644 --- a/numpy_class/classification_example.py +++ b/numpy_class/classification_example.py @@ -1,5 +1,6 @@ # https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python # https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python +# YouTube direct link: http://bit.ly/2LENC50 from __future__ import print_function, division from future.utils import iteritems diff --git a/numpy_class/regression_example.py b/numpy_class/regression_example.py index ea5861dd..aea9cc02 100644 --- a/numpy_class/regression_example.py +++ b/numpy_class/regression_example.py @@ -1,5 +1,6 @@ # https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python # https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python +# YouTube direct link: http://bit.ly/2LENC50 # Get the data from: # https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise From 99bbada812a85ca5b690924293ef8c006c4c6ba5 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 24 Jul 2018 15:26:52 -0400 Subject: [PATCH 069/329] update --- unsupervised_class3/bayes_classifier_gaussian.py | 10 ++++++++-- unsupervised_class3/bayes_classifier_gmm.py | 10 ++++++++-- unsupervised_class3/util.py | 6 ++++-- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/unsupervised_class3/bayes_classifier_gaussian.py b/unsupervised_class3/bayes_classifier_gaussian.py index 65506d06..d9098ef3 100644 --- a/unsupervised_class3/bayes_classifier_gaussian.py +++ b/unsupervised_class3/bayes_classifier_gaussian.py @@ -11,6 +11,12 @@ from scipy.stats import multivariate_normal as mvn +def clamp_sample(x): + x = np.minimum(x, 1) + x = np.maximum(x, 0) + return x + + class BayesClassifier: def fit(self, X, Y): # assume classes are numbered 0...K-1 @@ -30,11 +36,11 @@ def fit(self, X, Y): def sample_given_y(self, y): g = self.gaussians[y] - return mvn.rvs(mean=g['m'], cov=g['c']) + return clamp_sample( mvn.rvs(mean=g['m'], cov=g['c']) ) def sample(self): y = np.random.choice(self.K, p=self.p_y) - return self.sample_given_y(y) + return clamp_sample( self.sample_given_y(y) ) if __name__ == '__main__': diff --git a/unsupervised_class3/bayes_classifier_gmm.py b/unsupervised_class3/bayes_classifier_gmm.py index 0b79b831..b129c522 100644 --- a/unsupervised_class3/bayes_classifier_gmm.py +++ b/unsupervised_class3/bayes_classifier_gmm.py @@ -11,6 +11,12 @@ from sklearn.mixture import BayesianGaussianMixture +def clamp_sample(x): + x = np.minimum(x, 1) + x = np.maximum(x, 0) + return x + + class BayesClassifier: def fit(self, X, Y): # assume classes are numbered 0...K-1 @@ -39,11 +45,11 @@ def sample_given_y(self, y): # we cheat by looking at "non-public" params in # the sklearn source code mean = gmm.means_[sample[1]] - return sample[0].reshape(28, 28), mean.reshape(28, 28) + return clamp_sample( sample[0].reshape(28, 28) ), mean.reshape(28, 28) def sample(self): y = np.random.choice(self.K, p=self.p_y) - return self.sample_given_y(y) + return clamp_sample( self.sample_given_y(y) ) if __name__ == '__main__': diff --git a/unsupervised_class3/util.py b/unsupervised_class3/util.py index 64e5e26d..c8194c80 100644 --- a/unsupervised_class3/util.py +++ b/unsupervised_class3/util.py @@ -14,6 +14,7 @@ from scipy.misc import imread, imsave, imresize from glob import glob from tqdm import tqdm +from sklearn.utils import shuffle def get_mnist(limit=None): @@ -26,10 +27,11 @@ def get_mnist(limit=None): print("Reading in and transforming data...") df = pd.read_csv('../large_files/train.csv') - data = df.as_matrix() - np.random.shuffle(data) + data = df.values + # np.random.shuffle(data) X = data[:, 1:] / 255.0 # data is from 0..255 Y = data[:, 0] + X, Y = shuffle(X, Y) if limit is not None: X, Y = X[:limit], Y[:limit] return X, Y From 98a798afc423f5301608ed7074dc4f7f36d271d3 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Tue, 24 Jul 2018 23:18:01 -0400 Subject: [PATCH 070/329] add theano --- nlp_class2/word2vec_theano.py | 409 ++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100644 nlp_class2/word2vec_theano.py diff --git a/nlp_class2/word2vec_theano.py b/nlp_class2/word2vec_theano.py new file mode 100644 index 00000000..50e3efd5 --- /dev/null +++ b/nlp_class2/word2vec_theano.py @@ -0,0 +1,409 @@ +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import json +import numpy as np +import matplotlib.pyplot as plt +from scipy.special import expit as sigmoid +from sklearn.utils import shuffle +from datetime import datetime +# from util import find_analogies + +from scipy.spatial.distance import cosine as cos_dist +from sklearn.metrics.pairwise import pairwise_distances + + +from glob import glob + +import os +import sys +import string + +import theano +import theano.tensor as T + + + +# unfortunately these work different ways +def remove_punctuation_2(s): + return s.translate(None, string.punctuation) + +def remove_punctuation_3(s): + return s.translate(str.maketrans('','',string.punctuation)) + +if sys.version.startswith('2'): + remove_punctuation = remove_punctuation_2 +else: + remove_punctuation = remove_punctuation_3 + + + +def get_wiki(): + V = 20000 + files = glob('../large_files/enwiki*.txt') + all_word_counts = {} + for f in files: + for line in open(f): + if line and line[0] not in '[*-|=\{\}': + s = remove_punctuation(line).lower().split() + if len(s) > 1: + for word in s: + if word not in all_word_counts: + all_word_counts[word] = 0 + all_word_counts[word] += 1 + print("finished counting") + + V = min(V, len(all_word_counts)) + all_word_counts = sorted(all_word_counts.items(), key=lambda x: x[1], reverse=True) + + top_words = [w for w, count in all_word_counts[:V-1]] + [''] + word2idx = {w:i for i, w in enumerate(top_words)} + unk = word2idx[''] + + sents = [] + for f in files: + for line in open(f): + if line and line[0] not in '[*-|=\{\}': + s = remove_punctuation(line).lower().split() + if len(s) > 1: + # if a word is not nearby another word, there won't be any context! + # and hence nothing to train! + sent = [word2idx[w] if w in word2idx else unk for w in s] + sents.append(sent) + return sents, word2idx + + + + +def train_model(savedir): + # get the data + sentences, word2idx = get_wiki() #get_text8() + + + # number of unique words + vocab_size = len(word2idx) + + + # config + window_size = 5 + learning_rate = 0.025*128 + final_learning_rate = 0.0001*128 + num_negatives = 5 # number of negative samples to draw per input word + samples_per_epoch = int(1e5) + epochs = 1 + D = 50 # word embedding size + + + # learning rate decay + learning_rate_delta = (learning_rate - final_learning_rate) / epochs + # learning_rate_delta = 0 + + + # params + W = np.random.randn(vocab_size, D) / np.sqrt(D + vocab_size) # input-to-hidden + V = np.random.randn(D, vocab_size) / np.sqrt(D + vocab_size) # hidden-to-output + + + # theano variables + thW = theano.shared(W) + thV = theano.shared(V) + + # theano placeholders + th_pos_word = T.ivector('pos_word') + th_neg_word = T.ivector('neg_word') + th_context = T.ivector('context') + th_lr = T.scalar('learning_rate') + + # get the output and loss + input_words = T.concatenate([th_pos_word, th_neg_word]) + W_subset = thW[input_words] + dbl_context = T.concatenate([th_context, th_context]) + V_subset = thV[:, dbl_context] + logits = W_subset.dot(V_subset) + out = T.nnet.sigmoid(logits) + + n = th_pos_word.shape[0] + th_cost = -T.log(out[:n]).mean() - T.log(1 - out[n:]).mean() + + + # specify the updates + gW = T.grad(th_cost, W_subset) + gV = T.grad(th_cost, V_subset) + W_update = T.inc_subtensor(W_subset, -th_lr*gW) + V_update = T.inc_subtensor(V_subset, -th_lr*gV) + updates = [(thW, W_update), (thV, V_update)] + + # full update + # gW, gV = T.grad(th_cost, [thW, thV]) + # vW = theano.shared(np.zeros_like(W)) + # vV = theano.shared(np.zeros_like(V)) + # new_vW = 0.9*vW - th_lr*gW + # new_vV = 0.9*vV - th_lr*gV + # W_update = thW + new_vW + # V_update = thV + new_vV + # updates = [(thW, W_update), (thV, V_update), (vW, new_vW), (vV, new_vV)] + + # make callable functions + cost_op = theano.function( + inputs=[th_pos_word, th_neg_word, th_context], + outputs=th_cost, + # allow_input_downcast=True + ) + cost_train_op = theano.function( + inputs=[th_pos_word, th_neg_word, th_context, th_lr], + outputs=th_cost, + updates=updates, + # allow_input_downcast=True + ) + + + # distribution for drawing negative samples + p_neg = get_negative_sampling_distribution(sentences, vocab_size) + + + # save the costs to plot them per iteration + costs = [] + + + # number of total words in corpus + total_words = sum(len(sentence) for sentence in sentences) + print("total number of words in corpus:", total_words) + + + # keep only certain words based on p_neg + threshold = 1e-5 + p_drop = 1 - np.sqrt(threshold / p_neg) + + + # train the model + for epoch in range(epochs): + # randomly order sentences so we don't always see + # sentences in the same order + np.random.shuffle(sentences) + + # accumulate the cost + cost = 0 + counter = 0 + inputs = [] + targets = [] + negwords = [] + t0 = datetime.now() + for sentence in sentences: + # keep only certain words based on p_neg + sentence = [w for w in sentence \ + if np.random.random() < (1 - p_drop[w]) + ] + if len(sentence) < 2: + continue + + # randomly order words so we don't always see + # samples in the same order + randomly_ordered_positions = np.random.choice( + len(sentence), + size=len(sentence), + replace=False, + ) + + + for pos in randomly_ordered_positions: + # the middle word + word = sentence[pos] + + # get the positive context words/negative samples + context_words = get_context(pos, sentence, window_size) + neg_word = np.random.choice(vocab_size, p=p_neg) + + n = len(context_words) + inputs += [word]*n + negwords += [neg_word]*n + targets += context_words + + + if len(inputs) >= 128: + c = cost_train_op(inputs, negwords, targets, learning_rate) + cost += c + + if np.isnan(c): + print("c is nan:", c) + exit() + + # reset + inputs = [] + targets = [] + negwords = [] + + counter += 1 + if counter % 100 == 0: + sys.stdout.write("processed %s / %s, cost: %s\r" % (counter, len(sentences), c)) + sys.stdout.flush() + + + # print stuff so we don't stare at a blank screen + dt = datetime.now() - t0 + print("epoch complete:", epoch, "cost:", cost, "dt:", dt) + + # save the cost + costs.append(cost) + + # update the learning rate + learning_rate -= learning_rate_delta + + + # plot the cost per iteration + plt.plot(costs) + plt.show() + + + # save the model + if not os.path.exists(savedir): + os.mkdir(savedir) + + with open('%s/word2idx.json' % savedir, 'w') as f: + json.dump(word2idx, f) + + # don't forget to extract the weights from theano + W, V = thW.get_value(), thV.get_value() + np.savez('%s/weights.npz' % savedir, W, V) + + # return the model + return word2idx, W, V + + +def get_negative_sampling_distribution(sentences, vocab_size): + # Pn(w) = prob of word occuring + # we would like to sample the negative samples + # such that words that occur more often + # should be sampled more often + + word_freq = np.zeros(vocab_size) + word_count = sum(len(sentence) for sentence in sentences) + for sentence in sentences: + for word in sentence: + word_freq[word] += 1 + + # smooth it + p_neg = word_freq**0.75 + + # normalize it + p_neg = p_neg / p_neg.sum() + + assert(np.all(p_neg > 0)) + return p_neg + + +def get_context(pos, sentence, window_size): + # input: + # a sentence of the form: x x x x c c c pos c c c x x x x + # output: + # the context word indices: c c c c c c + + start = max(0, pos - window_size) + end_ = min(len(sentence), pos + window_size) + + context = [] + for ctx_pos, ctx_word_idx in enumerate(sentence[start:end_], start=start): + if ctx_pos != pos: + # don't include the input word itself as a target + context.append(ctx_word_idx) + return context + # return np.concatenate([sentence[start:pos], sentence[pos+1:end_]]) + + +def load_model(savedir): + with open('%s/word2idx.json' % savedir) as f: + word2idx = json.load(f) + npz = np.load('%s/weights.npz' % savedir) + W = npz['arr_0'] + V = npz['arr_1'] + return word2idx, W, V + + + +def analogy(pos1, neg1, pos2, neg2, word2idx, idx2word, W): + V, D = W.shape + + # don't actually use pos2 in calculation, just print what's expected + print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2)) + for w in (pos1, neg1, pos2, neg2): + if w not in word2idx: + print("Sorry, %s not in word2idx" % w) + return + + p1 = W[word2idx[pos1]] + n1 = W[word2idx[neg1]] + p2 = W[word2idx[pos2]] + n2 = W[word2idx[neg2]] + + vec = p1 - n1 + n2 + + distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V) + idx = distances.argsort()[:10] + + # pick one that's not p1, n1, or n2 + best_idx = -1 + keep_out = [word2idx[w] for w in (pos1, neg1, neg2)] + # print("keep_out:", keep_out) + for i in idx: + if i not in keep_out: + best_idx = i + break + # print("best_idx:", best_idx) + + print("got: %s - %s = %s - %s" % (pos1, neg1, idx2word[best_idx], neg2)) + print("closest 10:") + for i in idx: + print(idx2word[i], distances[i]) + + print("dist to %s:" % pos2, cos_dist(p2, vec)) + + +def test_model(word2idx, W, V): + # there are multiple ways to get the "final" word embedding + # We = (W + V.T) / 2 + # We = W + + idx2word = {i:w for w, i in word2idx.items()} + + for We in (W, (W + V.T) / 2): + print("**********") + + analogy('king', 'man', 'queen', 'woman', word2idx, idx2word, We) + analogy('king', 'prince', 'queen', 'princess', word2idx, idx2word, We) + analogy('miami', 'florida', 'dallas', 'texas', word2idx, idx2word, We) + analogy('einstein', 'scientist', 'picasso', 'painter', word2idx, idx2word, We) + analogy('japan', 'sushi', 'germany', 'bratwurst', word2idx, idx2word, We) + analogy('man', 'woman', 'he', 'she', word2idx, idx2word, We) + analogy('man', 'woman', 'uncle', 'aunt', word2idx, idx2word, We) + analogy('man', 'woman', 'brother', 'sister', word2idx, idx2word, We) + analogy('man', 'woman', 'husband', 'wife', word2idx, idx2word, We) + analogy('man', 'woman', 'actor', 'actress', word2idx, idx2word, We) + analogy('man', 'woman', 'father', 'mother', word2idx, idx2word, We) + analogy('heir', 'heiress', 'prince', 'princess', word2idx, idx2word, We) + analogy('nephew', 'niece', 'uncle', 'aunt', word2idx, idx2word, We) + analogy('france', 'paris', 'japan', 'tokyo', word2idx, idx2word, We) + analogy('france', 'paris', 'china', 'beijing', word2idx, idx2word, We) + analogy('february', 'january', 'december', 'november', word2idx, idx2word, We) + analogy('france', 'paris', 'germany', 'berlin', word2idx, idx2word, We) + analogy('week', 'day', 'year', 'month', word2idx, idx2word, We) + analogy('week', 'day', 'hour', 'minute', word2idx, idx2word, We) + analogy('france', 'paris', 'italy', 'rome', word2idx, idx2word, We) + analogy('paris', 'france', 'rome', 'italy', word2idx, idx2word, We) + analogy('france', 'french', 'england', 'english', word2idx, idx2word, We) + analogy('japan', 'japanese', 'china', 'chinese', word2idx, idx2word, We) + analogy('china', 'chinese', 'america', 'american', word2idx, idx2word, We) + analogy('japan', 'japanese', 'italy', 'italian', word2idx, idx2word, We) + analogy('japan', 'japanese', 'australia', 'australian', word2idx, idx2word, We) + analogy('walk', 'walking', 'swim', 'swimming', word2idx, idx2word, We) + + + +if __name__ == '__main__': + word2idx, W, V = train_model('w2v_model') + # word2idx, W, V = load_model('w2v_model') + test_model(word2idx, W, V) + From a4193c98543edb444fe586b37600fe173e907814 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 25 Jul 2018 02:57:37 -0400 Subject: [PATCH 071/329] update --- cnn_class2/class_activation_maps.py | 97 +++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 cnn_class2/class_activation_maps.py diff --git a/cnn_class2/class_activation_maps.py b/cnn_class2/class_activation_maps.py new file mode 100644 index 00000000..951a4fc5 --- /dev/null +++ b/cnn_class2/class_activation_maps.py @@ -0,0 +1,97 @@ +# https://deeplearningcourses.com/c/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.layers import Input, Lambda, Dense, Flatten +from keras.models import Model +from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions +# from keras.applications.inception_v3 import InceptionV3, preprocess_input +from keras.preprocessing import image +from keras.preprocessing.image import ImageDataGenerator + +from sklearn.metrics import confusion_matrix +import numpy as np +import scipy as sp +import matplotlib.pyplot as plt + +from glob import glob + +from skimage.transform import rescale, resize + + + +# useful for getting number of files +image_files = glob('../large_files/256_ObjectCategories/*/*.jp*g') +image_files += glob('../large_files/101_ObjectCategories/*/*.jp*g') + + + +# look at an image for fun +plt.imshow(image.load_img(np.random.choice(image_files))) +plt.show() + + +# add preprocessing layer to the front of VGG +resnet = ResNet50(input_shape=(224, 224, 3), weights='imagenet', include_top=True) + +# view the structure of the model +# if you want to confirm we need activation_49 +resnet.summary() + +# make a model to get output before flatten +activation_layer = resnet.get_layer('activation_49') + +# create a model object +model = Model(inputs=resnet.input, outputs=activation_layer.output) + +# get the feature map weights +final_dense = resnet.get_layer('fc1000') +W = final_dense.get_weights()[0] + + +while True: + img = image.load_img(np.random.choice(image_files), target_size=(224, 224)) + x = preprocess_input(np.expand_dims(img, 0)) + fmaps = model.predict(x)[0] # 7 x 7 x 2048 + + # get predicted class + probs = resnet.predict(x) + classnames = decode_predictions(probs)[0] + print(classnames) + classname = classnames[0][1] + pred = np.argmax(probs[0]) + + # get the 2048 weights for the relevant class + w = W[:, pred] + + # "dot" w with fmaps + cam = fmaps.dot(w) + + # upsample to 224 x 224 + cam = sp.ndimage.zoom(cam, (32, 32), order=1) + + plt.subplot(1,2,1) + plt.imshow(img, alpha=0.8) + plt.imshow(cam, cmap='jet', alpha=0.5) + plt.subplot(1,2,2) + plt.imshow(img) + plt.title(classname) + plt.show() + + ans = input("Continue? (Y/n)") + if ans and ans[0].lower() == 'n': + break + + + +# def slowversion(A, w): +# N = len(w) +# result = np.zeros(A.shape[:-1]) +# for i in range(N): +# result += A[:,:,i]*w[i] +# return result + From fabc3989809fea1ade7026e6d8d5107884f4db21 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 25 Jul 2018 20:45:25 -0400 Subject: [PATCH 072/329] fix comment --- unsupervised_class3/parameterize_guassian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsupervised_class3/parameterize_guassian.py b/unsupervised_class3/parameterize_guassian.py index 7f5f2d5a..497a7b88 100644 --- a/unsupervised_class3/parameterize_guassian.py +++ b/unsupervised_class3/parameterize_guassian.py @@ -17,7 +17,7 @@ def softplus(x): # we're going to make a neural network # with the layer sizes (4, 3, 2) -# like a toy version of a decoder +# like a toy version of a encoder W1 = np.random.randn(4, 3) W2 = np.random.randn(3, 2*2) From de1a9a4fe089308711812f913ed3a58e8e3607b6 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 1 Aug 2018 13:35:33 -0400 Subject: [PATCH 073/329] more pytorch --- ann_class2/pytorch_batchnorm.py | 195 +++++++++++++++++++++++++++++++ ann_class2/pytorch_dropout.py | 196 ++++++++++++++++++++++++++++++++ ann_class2/pytorch_example.py | 2 +- ann_class2/pytorch_example2.py | 6 +- 4 files changed, 396 insertions(+), 3 deletions(-) create mode 100644 ann_class2/pytorch_batchnorm.py create mode 100644 ann_class2/pytorch_dropout.py diff --git a/ann_class2/pytorch_batchnorm.py b/ann_class2/pytorch_batchnorm.py new file mode 100644 index 00000000..748c179a --- /dev/null +++ b/ann_class2/pytorch_batchnorm.py @@ -0,0 +1,195 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +# Linux and Mac instructions: +# http://pytorch.org/#pip-install-pytorch + +# Windows instructions (just one line): +# conda install -c peterjc123 pytorch + +# Note: is helpful to look at keras_example.py first + + +import numpy as np +import matplotlib.pyplot as plt +from util import get_normalized_data + +import torch +from torch.autograd import Variable +from torch import optim + + + +# get the data, same as Theano + Tensorflow examples +# no need to split now, the fit() function will do it +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() + +# get shapes +_, D = Xtrain.shape +K = len(set(Ytrain)) + +# Note: no need to convert Y to indicator matrix + + +# the model will be a sequence of layers +model = torch.nn.Sequential() + + +# ANN with layers [784] -> [500] -> [300] -> [10] +model.add_module("dense1", torch.nn.Linear(D, 500)) +model.add_module("bn1", torch.nn.BatchNorm1d(500)) +model.add_module("relu1", torch.nn.ReLU()) +model.add_module("dense2", torch.nn.Linear(500, 300)) +model.add_module("bn2", torch.nn.BatchNorm1d(300)) +model.add_module("relu2", torch.nn.ReLU()) +model.add_module("dense3", torch.nn.Linear(300, K)) +# Note: no final softmax! +# just like Tensorflow, it's included in cross-entropy function + + +# define a loss function +# other loss functions can be found here: +# http://pytorch.org/docs/master/nn.html#loss-functions +loss = torch.nn.CrossEntropyLoss(size_average=True) +# Note: this returns a function! +# e.g. use it like: loss(logits, labels) + + +# define an optimizer +# other optimizers can be found here: +# http://pytorch.org/docs/master/optim.html +optimizer = optim.Adam(model.parameters(), lr=1e-4) + + + +# define the training procedure +# i.e. one step of gradient descent +# there are lots of steps +# so we encapsulate it in a function +# Note: inputs and labels are torch tensors +def train(model, loss, optimizer, inputs, labels): + # set the model to training mode + # because batch norm has 2 different modes! + model.train() + + inputs = Variable(inputs, requires_grad=False) + labels = Variable(labels, requires_grad=False) + + # Reset gradient + optimizer.zero_grad() + + # Forward + logits = model.forward(inputs) + output = loss.forward(logits, labels) + + # Backward + output.backward() + + # Update parameters + optimizer.step() + + # what's the difference between backward() and step()? + + return output.item() + + +# similar to train() but not doing the backprop step +def get_cost(model, loss, inputs, labels): + # set the model to testing mode + # because batch norm has 2 different modes! + model.eval() + + inputs = Variable(inputs, requires_grad=False) + labels = Variable(labels, requires_grad=False) + + # Forward + logits = model.forward(inputs) + output = loss.forward(logits, labels) + + return output.item() + + +# define the prediction procedure +# also encapsulate these steps +# Note: inputs is a torch tensor +def predict(model, inputs): + # set the model to testing mode + # because batch norm has 2 different modes! + model.eval() + + inputs = Variable(inputs, requires_grad=False) + logits = model.forward(inputs) + return logits.data.numpy().argmax(axis=1) + + +# return the accuracy +# labels is a torch tensor +# to get back the internal numpy data +# use the instance method .numpy() +def score(model, inputs, labels): + predictions = predict(model, inputs) + return np.mean(labels.numpy() == predictions) + + +### prepare for training loop ### + +# convert the data arrays into torch tensors +Xtrain = torch.from_numpy(Xtrain).float() +Ytrain = torch.from_numpy(Ytrain).long() +Xtest = torch.from_numpy(Xtest).float() +Ytest = torch.from_numpy(Ytest).long() + +# training parameters +epochs = 15 +batch_size = 32 +n_batches = Xtrain.size()[0] // batch_size + +# things to keep track of +train_costs = [] +test_costs = [] +train_accuracies = [] +test_accuracies = [] + +# main training loop +for i in range(epochs): + cost = 0 + test_cost = 0 + for j in range(n_batches): + Xbatch = Xtrain[j*batch_size:(j+1)*batch_size] + Ybatch = Ytrain[j*batch_size:(j+1)*batch_size] + cost += train(model, loss, optimizer, Xbatch, Ybatch) + + + # we could have also calculated the train cost here + # but I wanted to show you that we could also return it + # from the train function itself + train_acc = score(model, Xtrain, Ytrain) + test_acc = score(model, Xtest, Ytest) + test_cost = get_cost(model, loss, Xtest, Ytest) + + print("Epoch: %d, cost: %f, acc: %.2f" % (i, test_cost, test_acc)) + + # for plotting + train_costs.append(cost / n_batches) + train_accuracies.append(train_acc) + test_costs.append(test_cost) + test_accuracies.append(test_acc) + + + +# plot the results +plt.plot(train_costs, label='Train cost') +plt.plot(test_costs, label='Test cost') +plt.title('Cost') +plt.legend() +plt.show() + +plt.plot(train_accuracies, label='Train accuracy') +plt.plot(test_accuracies, label='Test accuracy') +plt.title('Accuracy') +plt.legend() +plt.show() diff --git a/ann_class2/pytorch_dropout.py b/ann_class2/pytorch_dropout.py new file mode 100644 index 00000000..970e3130 --- /dev/null +++ b/ann_class2/pytorch_dropout.py @@ -0,0 +1,196 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +# Linux and Mac instructions: +# http://pytorch.org/#pip-install-pytorch + +# Windows instructions (just one line): +# conda install -c peterjc123 pytorch + +# Note: is helpful to look at keras_example.py first + + +import numpy as np +import matplotlib.pyplot as plt +from util import get_normalized_data + +import torch +from torch.autograd import Variable +from torch import optim + + + +# get the data, same as Theano + Tensorflow examples +# no need to split now, the fit() function will do it +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() + +# get shapes +_, D = Xtrain.shape +K = len(set(Ytrain)) + +# Note: no need to convert Y to indicator matrix + + +# the model will be a sequence of layers +model = torch.nn.Sequential() + + +# ANN with layers [784] -> [500] -> [300] -> [10] +# NOTE: the "p" is p_drop, not p_keep +model.add_module("dropout1", torch.nn.Dropout(p=0.2)) +model.add_module("dense1", torch.nn.Linear(D, 500)) +model.add_module("relu1", torch.nn.ReLU()) +model.add_module("dropout2", torch.nn.Dropout(p=0.5)) +model.add_module("dense2", torch.nn.Linear(500, 300)) +model.add_module("relu2", torch.nn.ReLU()) +model.add_module("dropout3", torch.nn.Dropout(p=0.5)) +model.add_module("dense3", torch.nn.Linear(300, K)) +# Note: no final softmax! +# just like Tensorflow, it's included in cross-entropy function + + +# define a loss function +# other loss functions can be found here: +# http://pytorch.org/docs/master/nn.html#loss-functions +loss = torch.nn.CrossEntropyLoss(size_average=True) +# Note: this returns a function! +# e.g. use it like: loss(logits, labels) + + +# define an optimizer +# other optimizers can be found here: +# http://pytorch.org/docs/master/optim.html +optimizer = optim.Adam(model.parameters(), lr=1e-4) + + +# define the training procedure +# i.e. one step of gradient descent +# there are lots of steps +# so we encapsulate it in a function +# Note: inputs and labels are torch tensors +def train(model, loss, optimizer, inputs, labels): + # set the model to training mode + # because dropout has 2 different modes! + model.train() + + inputs = Variable(inputs, requires_grad=False) + labels = Variable(labels, requires_grad=False) + + # Reset gradient + optimizer.zero_grad() + + # Forward + logits = model.forward(inputs) + output = loss.forward(logits, labels) + + # Backward + output.backward() + + # Update parameters + optimizer.step() + + # what's the difference between backward() and step()? + + return output.item() + + +# similar to train() but not doing the backprop step +def get_cost(model, loss, inputs, labels): + # set the model to testing mode + # because dropout has 2 different modes! + model.eval() + + inputs = Variable(inputs, requires_grad=False) + labels = Variable(labels, requires_grad=False) + + # Forward + logits = model.forward(inputs) + output = loss.forward(logits, labels) + + return output.item() + + +# define the prediction procedure +# also encapsulate these steps +# Note: inputs is a torch tensor +def predict(model, inputs): + # set the model to testing mode + # because dropout has 2 different modes! + model.eval() + + inputs = Variable(inputs, requires_grad=False) + logits = model.forward(inputs) + return logits.data.numpy().argmax(axis=1) + + +# return the accuracy +# labels is a torch tensor +# to get back the internal numpy data +# use the instance method .numpy() +def score(model, inputs, labels): + predictions = predict(model, inputs) + return np.mean(labels.numpy() == predictions) + + +### prepare for training loop ### + +# convert the data arrays into torch tensors +Xtrain = torch.from_numpy(Xtrain).float() +Ytrain = torch.from_numpy(Ytrain).long() +Xtest = torch.from_numpy(Xtest).float() +Ytest = torch.from_numpy(Ytest).long() + +# training parameters +epochs = 15 +batch_size = 32 +n_batches = Xtrain.size()[0] // batch_size + +# things to keep track of +train_costs = [] +test_costs = [] +train_accuracies = [] +test_accuracies = [] + +# main training loop +for i in range(epochs): + cost = 0 + test_cost = 0 + for j in range(n_batches): + Xbatch = Xtrain[j*batch_size:(j+1)*batch_size] + Ybatch = Ytrain[j*batch_size:(j+1)*batch_size] + cost += train(model, loss, optimizer, Xbatch, Ybatch) + + + # we could have also calculated the train cost here + # but I wanted to show you that we could also return it + # from the train function itself + train_acc = score(model, Xtrain, Ytrain) + test_acc = score(model, Xtest, Ytest) + test_cost = get_cost(model, loss, Xtest, Ytest) + + print("Epoch: %d, cost: %f, acc: %.2f" % (i, test_cost, test_acc)) + + # for plotting + train_costs.append(cost / n_batches) + train_accuracies.append(train_acc) + test_costs.append(test_cost) + test_accuracies.append(test_acc) + + + +# plot the results +plt.plot(train_costs, label='Train cost') +plt.plot(test_costs, label='Test cost') +plt.title('Cost') +plt.legend() +plt.show() + +plt.plot(train_accuracies, label='Train accuracy') +plt.plot(test_accuracies, label='Test accuracy') +plt.title('Accuracy') +plt.legend() +plt.show() diff --git a/ann_class2/pytorch_example.py b/ann_class2/pytorch_example.py index bd6af2a8..f7a34f89 100644 --- a/ann_class2/pytorch_example.py +++ b/ann_class2/pytorch_example.py @@ -87,7 +87,7 @@ def train(model, loss, optimizer, inputs, labels): optimizer.step() # what's the difference between backward() and step()? - return output.data[0] + return output.item() # define the prediction procedure diff --git a/ann_class2/pytorch_example2.py b/ann_class2/pytorch_example2.py index 9822cd3e..5620cba6 100644 --- a/ann_class2/pytorch_example2.py +++ b/ann_class2/pytorch_example2.py @@ -88,7 +88,7 @@ def train(model, loss, optimizer, inputs, labels): # what's the difference between backward() and step()? - return output.data[0] + return output.item() # similar to train() but not doing the backprop step @@ -100,7 +100,7 @@ def get_cost(model, loss, inputs, labels): logits = model.forward(inputs) output = loss.forward(logits, labels) - return output.data[0] + return output.item() # define the prediction procedure @@ -171,9 +171,11 @@ def score(model, inputs, labels): plt.plot(train_costs, label='Train cost') plt.plot(test_costs, label='Test cost') plt.title('Cost') +plt.legend() plt.show() plt.plot(train_accuracies, label='Train accuracy') plt.plot(test_accuracies, label='Test accuracy') plt.title('Accuracy') +plt.legend() plt.show() From b61bf0a734429f53b38988904d5d5fc8e41d82b8 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 1 Aug 2018 15:06:42 -0400 Subject: [PATCH 074/329] update --- ann_class2/pytorch_batchnorm.py | 6 ------ ann_class2/pytorch_dropout.py | 6 ------ ann_class2/pytorch_example.py | 6 ------ ann_class2/pytorch_example2.py | 6 ------ 4 files changed, 24 deletions(-) diff --git a/ann_class2/pytorch_batchnorm.py b/ann_class2/pytorch_batchnorm.py index 748c179a..766dc805 100644 --- a/ann_class2/pytorch_batchnorm.py +++ b/ann_class2/pytorch_batchnorm.py @@ -5,12 +5,6 @@ # Note: you may need to update your version of future # sudo pip install -U future -# Linux and Mac instructions: -# http://pytorch.org/#pip-install-pytorch - -# Windows instructions (just one line): -# conda install -c peterjc123 pytorch - # Note: is helpful to look at keras_example.py first diff --git a/ann_class2/pytorch_dropout.py b/ann_class2/pytorch_dropout.py index 970e3130..58e76a49 100644 --- a/ann_class2/pytorch_dropout.py +++ b/ann_class2/pytorch_dropout.py @@ -5,12 +5,6 @@ # Note: you may need to update your version of future # sudo pip install -U future -# Linux and Mac instructions: -# http://pytorch.org/#pip-install-pytorch - -# Windows instructions (just one line): -# conda install -c peterjc123 pytorch - # Note: is helpful to look at keras_example.py first diff --git a/ann_class2/pytorch_example.py b/ann_class2/pytorch_example.py index f7a34f89..4b587690 100644 --- a/ann_class2/pytorch_example.py +++ b/ann_class2/pytorch_example.py @@ -5,12 +5,6 @@ # Note: you may need to update your version of future # sudo pip install -U future -# Linux and Mac instructions: -# http://pytorch.org/#pip-install-pytorch - -# Windows instructions (just one line): -# conda install -c peterjc123 pytorch - # Note: is helpful to look at keras_example.py first diff --git a/ann_class2/pytorch_example2.py b/ann_class2/pytorch_example2.py index 5620cba6..a58c45b2 100644 --- a/ann_class2/pytorch_example2.py +++ b/ann_class2/pytorch_example2.py @@ -5,12 +5,6 @@ # Note: you may need to update your version of future # sudo pip install -U future -# Linux and Mac instructions: -# http://pytorch.org/#pip-install-pytorch - -# Windows instructions (just one line): -# conda install -c peterjc123 pytorch - # Note: is helpful to look at keras_example.py first From 3a07285e06a0734c33537d96de960755502611d5 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 1 Aug 2018 16:08:01 -0400 Subject: [PATCH 075/329] update --- ann_class2/pytorch_example.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ann_class2/pytorch_example.py b/ann_class2/pytorch_example.py index 4b587690..53a26d48 100644 --- a/ann_class2/pytorch_example.py +++ b/ann_class2/pytorch_example.py @@ -64,10 +64,12 @@ # so we encapsulate it in a function # Note: inputs and labels are torch tensors def train(model, loss, optimizer, inputs, labels): + # https://discuss.pytorch.org/t/why-is-it-recommended-to-wrap-your-data-with-variable-each-step-of-the-iterations-rather-than-before-training-starts/12683 inputs = Variable(inputs, requires_grad=False) labels = Variable(labels, requires_grad=False) # Reset gradient + # https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/7 optimizer.zero_grad() # Forward @@ -81,6 +83,7 @@ def train(model, loss, optimizer, inputs, labels): optimizer.step() # what's the difference between backward() and step()? + # https://discuss.pytorch.org/t/what-does-the-backward-function-do/9944 return output.item() From 94c4328eb0dd1e8ff5aac9014fd6ec977d3945c0 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 3 Aug 2018 20:18:19 -0400 Subject: [PATCH 076/329] tune monte carlo es loop penalty: --- rl/monte_carlo_es.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/rl/monte_carlo_es.py b/rl/monte_carlo_es.py index b585bccf..79fca9f1 100644 --- a/rl/monte_carlo_es.py +++ b/rl/monte_carlo_es.py @@ -36,15 +36,22 @@ def play_game(grid, policy): # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) states_actions_rewards = [(s, a, 0)] seen_states = set() + seen_states.add(grid.current_state()) + num_steps = 0 while True: - old_s = grid.current_state() r = grid.move(a) + num_steps += 1 s = grid.current_state() if s in seen_states: # hack so that we don't end up in an infinitely long episode # bumping into the wall repeatedly - states_actions_rewards.append((s, None, -100)) + # if num_steps == 1 -> bumped into a wall and haven't moved anywhere + # reward = -10 + # else: + # reward = falls off by 1 / num_steps + reward = -10. / num_steps + states_actions_rewards.append((s, None, reward)) break elif grid.game_over(): states_actions_rewards.append((s, None, r)) From b8b97e32891b4be89a1adc1b316b8ba7142f951f Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 4 Aug 2018 14:56:08 -0400 Subject: [PATCH 077/329] change as_matrix to values --- ab_testing/client.py | 4 ++-- ann_logistic_extra/process.py | 2 +- cnn_class2/class_activation_maps.py | 7 ++----- cnn_class2/fashion.py | 2 +- cnn_class2/fashion2.py | 2 +- hmm_class/hmmd.py | 4 ++++ linear_regression_class/systolic.py | 2 +- nlp_class/nb.py | 2 +- nlp_class/spam2.py | 5 +++-- nlp_class3/bilstm_mnist.py | 2 +- supervised_class/bayes.py | 7 +++++++ supervised_class/util.py | 2 +- supervised_class2/rf_classification.py | 4 ++-- supervised_class2/rf_regression.py | 8 ++++---- unsupervised_class/kmeans_mnist.py | 4 ++-- unsupervised_class2/util.py | 2 +- 16 files changed, 34 insertions(+), 25 deletions(-) diff --git a/ab_testing/client.py b/ab_testing/client.py index 8cedc4d9..1b130447 100644 --- a/ab_testing/client.py +++ b/ab_testing/client.py @@ -16,8 +16,8 @@ df = pd.read_csv('advertisement_clicks.csv') a = df[df['advertisement_id'] == 'A'] b = df[df['advertisement_id'] == 'B'] -a = a['action'].as_matrix() -b = b['action'].as_matrix() +a = a['action'].values +b = b['action'].values print("a.mean:", a.mean()) print("b.mean:", b.mean()) diff --git a/ann_logistic_extra/process.py b/ann_logistic_extra/process.py index 568ba107..785755b7 100644 --- a/ann_logistic_extra/process.py +++ b/ann_logistic_extra/process.py @@ -21,7 +21,7 @@ def get_data(): # df.head() # easier to work with numpy array - data = df.as_matrix() + data = df.values # shuffle it np.random.shuffle(data) diff --git a/cnn_class2/class_activation_maps.py b/cnn_class2/class_activation_maps.py index 951a4fc5..e85ab504 100644 --- a/cnn_class2/class_activation_maps.py +++ b/cnn_class2/class_activation_maps.py @@ -6,14 +6,10 @@ # Note: you may need to update your version of future # sudo pip install -U future -from keras.layers import Input, Lambda, Dense, Flatten from keras.models import Model from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions -# from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.preprocessing import image -from keras.preprocessing.image import ImageDataGenerator -from sklearn.metrics import confusion_matrix import numpy as np import scipy as sp import matplotlib.pyplot as plt @@ -24,7 +20,7 @@ -# useful for getting number of files +# get the image files image_files = glob('../large_files/256_ObjectCategories/*/*.jp*g') image_files += glob('../large_files/101_ObjectCategories/*/*.jp*g') @@ -72,6 +68,7 @@ cam = fmaps.dot(w) # upsample to 224 x 224 + # 7 x 32 = 224 cam = sp.ndimage.zoom(cam, (32, 32), order=1) plt.subplot(1,2,1) diff --git a/cnn_class2/fashion.py b/cnn_class2/fashion.py index db845f8a..858be20e 100644 --- a/cnn_class2/fashion.py +++ b/cnn_class2/fashion.py @@ -26,7 +26,7 @@ def y2indicator(Y): # get the data # https://www.kaggle.com/zalando-research/fashionmnist data = pd.read_csv('../large_files/fashionmnist/fashion-mnist_train.csv') -data = data.as_matrix() +data = data.values np.random.shuffle(data) X = data[:, 1:].reshape(-1, 28, 28, 1) / 255.0 diff --git a/cnn_class2/fashion2.py b/cnn_class2/fashion2.py index 33231b66..4d2d22d6 100644 --- a/cnn_class2/fashion2.py +++ b/cnn_class2/fashion2.py @@ -26,7 +26,7 @@ def y2indicator(Y): # get the data # https://www.kaggle.com/zalando-research/fashionmnist data = pd.read_csv('../large_files/fashionmnist/fashion-mnist_train.csv') -data = data.as_matrix() +data = data.values np.random.shuffle(data) X = data[:, 1:].reshape(-1, 28, 28, 1) / 255.0 diff --git a/hmm_class/hmmd.py b/hmm_class/hmmd.py index 2da74796..d50d5832 100644 --- a/hmm_class/hmmd.py +++ b/hmm_class/hmmd.py @@ -10,6 +10,7 @@ import numpy as np import matplotlib.pyplot as plt +from datetime import datetime def random_normalized(d1, d2): @@ -22,6 +23,7 @@ def __init__(self, M): self.M = M # number of hidden states def fit(self, X, max_iter=30): + t0 = datetime.now() np.random.seed(123) # train the HMM model using the Baum-Welch algorithm # a specific instance of the expectation-maximization algorithm @@ -136,6 +138,8 @@ def fit(self, X, max_iter=30): print("B:", self.B) print("pi:", self.pi) + print("Fit duration:", (datetime.now() - t0)) + plt.plot(costs) plt.show() diff --git a/linear_regression_class/systolic.py b/linear_regression_class/systolic.py index fc71ee99..b7451837 100644 --- a/linear_regression_class/systolic.py +++ b/linear_regression_class/systolic.py @@ -20,7 +20,7 @@ import pandas as pd df = pd.read_excel('mlr02.xls') -X = df.as_matrix() +X = df.values # using age to predict systolic blood pressure plt.scatter(X[:,1], X[:,0]) diff --git a/nlp_class/nb.py b/nlp_class/nb.py index 2b16fc08..5d575915 100644 --- a/nlp_class/nb.py +++ b/nlp_class/nb.py @@ -18,7 +18,7 @@ # it will work for other types of "counts", like tf-idf, so it should # also work for our "word proportions" -data = pd.read_csv('spambase.data').as_matrix() # use pandas for convenience +data = pd.read_csv('spambase.data').values # use pandas for convenience np.random.shuffle(data) # shuffle each row in-place, but preserve the row X = data[:,:48] diff --git a/nlp_class/spam2.py b/nlp_class/spam2.py index 37d787cf..b5e069cc 100644 --- a/nlp_class/spam2.py +++ b/nlp_class/spam2.py @@ -14,6 +14,7 @@ from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB +from sklearn.svm import SVC from wordcloud import WordCloud @@ -32,7 +33,7 @@ # create binary labels df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1}) -Y = df['b_labels'].as_matrix() +Y = df['b_labels'].values # try multiple ways of calculating features # tfidf = TfidfVectorizer(decode_error='ignore') @@ -49,7 +50,7 @@ model.fit(Xtrain, Ytrain) print("train score:", model.score(Xtrain, Ytrain)) print("test score:", model.score(Xtest, Ytest)) - +exit() # visualize the data diff --git a/nlp_class3/bilstm_mnist.py b/nlp_class3/bilstm_mnist.py index 9a79b546..4002b2ae 100644 --- a/nlp_class3/bilstm_mnist.py +++ b/nlp_class3/bilstm_mnist.py @@ -28,7 +28,7 @@ def get_mnist(limit=None): print("Reading in and transforming data...") df = pd.read_csv('../large_files/train.csv') - data = df.as_matrix() + data = df.values np.random.shuffle(data) X = data[:, 1:].reshape(-1, 28, 28) / 255.0 # data is from 0..255 Y = data[:, 0] diff --git a/supervised_class/bayes.py b/supervised_class/bayes.py index ce139051..ab3979cf 100644 --- a/supervised_class/bayes.py +++ b/supervised_class/bayes.py @@ -9,6 +9,7 @@ import numpy as np +import matplotlib.pyplot as plt from util import get_data from datetime import datetime from scipy.stats import norm @@ -60,3 +61,9 @@ def predict(self, X): t0 = datetime.now() print("Test accuracy:", model.score(Xtest, Ytest)) print("Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest)) + + # plot the mean of each class + for c, g in iteritems(model.gaussians): + plt.imshow(g['mean'].reshape(28, 28)) + plt.title(c) + plt.show() diff --git a/supervised_class/util.py b/supervised_class/util.py index cf041dc4..d733eaad 100644 --- a/supervised_class/util.py +++ b/supervised_class/util.py @@ -12,7 +12,7 @@ def get_data(limit=None): print("Reading in and transforming data...") df = pd.read_csv('../large_files/train.csv') - data = df.as_matrix() + data = df.values np.random.shuffle(data) X = data[:, 1:] / 255.0 # data is from 0..255 Y = data[:, 0] diff --git a/supervised_class2/rf_classification.py b/supervised_class2/rf_classification.py index 1991c7c3..6466f1b4 100644 --- a/supervised_class2/rf_classification.py +++ b/supervised_class2/rf_classification.py @@ -55,7 +55,7 @@ def transform(self, df): X = np.zeros((N, self.D)) i = 0 for col, scaler in iteritems(self.scalers): - X[:,i] = scaler.transform(df[col].as_matrix().reshape(-1, 1)).flatten() + X[:,i] = scaler.transform(df[col].values.reshape(-1, 1)).flatten() i += 1 for col, encoder in iteritems(self.labelEncoders): @@ -98,7 +98,7 @@ def get_data(): transformer = DataTransformer() X = transformer.fit_transform(df) - Y = df[0].as_matrix() + Y = df[0].values return X, Y diff --git a/supervised_class2/rf_regression.py b/supervised_class2/rf_regression.py index 37ffb012..2b219a34 100644 --- a/supervised_class2/rf_regression.py +++ b/supervised_class2/rf_regression.py @@ -44,7 +44,7 @@ def fit(self, df): self.scalers = {} for col in NUMERICAL_COLS: scaler = StandardScaler() - scaler.fit(df[col].as_matrix().reshape(-1, 1)) + scaler.fit(df[col].values.reshape(-1, 1)) self.scalers[col] = scaler def transform(self, df): @@ -53,7 +53,7 @@ def transform(self, df): X = np.zeros((N, D)) i = 0 for col, scaler in iteritems(self.scalers): - X[:,i] = scaler.transform(df[col].as_matrix().reshape(-1, 1)).flatten() + X[:,i] = scaler.transform(df[col].values.reshape(-1, 1)).flatten() i += 1 for col in NO_TRANSFORM: X[:,i] = df[col] @@ -96,9 +96,9 @@ def get_data(): df_test = df.loc[test_idx] Xtrain = transformer.fit_transform(df_train) - Ytrain = np.log(df_train['medv'].as_matrix()) + Ytrain = np.log(df_train['medv'].values) Xtest = transformer.transform(df_test) - Ytest = np.log(df_test['medv'].as_matrix()) + Ytest = np.log(df_test['medv'].values) return Xtrain, Ytrain, Xtest, Ytest diff --git a/unsupervised_class/kmeans_mnist.py b/unsupervised_class/kmeans_mnist.py index 3fa0293e..fd7ca76e 100644 --- a/unsupervised_class/kmeans_mnist.py +++ b/unsupervised_class/kmeans_mnist.py @@ -16,13 +16,13 @@ import numpy as np import pandas as pd import matplotlib.pyplot as plt -from .kmeans import plot_k_means, get_simple_data +from kmeans import plot_k_means, get_simple_data from datetime import datetime def get_data(limit=None): print("Reading in and transforming data...") df = pd.read_csv('../large_files/train.csv') - data = df.as_matrix() + data = df.values np.random.shuffle(data) X = data[:, 1:] / 255.0 # data is from 0..255 Y = data[:, 0] diff --git a/unsupervised_class2/util.py b/unsupervised_class2/util.py index 3b4f2e4c..24ed683e 100644 --- a/unsupervised_class2/util.py +++ b/unsupervised_class2/util.py @@ -23,7 +23,7 @@ def getKaggleMNIST(): # column 0 is labels # column 1-785 is data, with values 0 .. 255 # total size of CSV: (42000, 1, 28, 28) - train = pd.read_csv('../large_files/train.csv').as_matrix().astype(np.float32) + train = pd.read_csv('../large_files/train.csv').values.astype(np.float32) train = shuffle(train) Xtrain = train[:-1000,1:] / 255 From 9126ec70a24d034731d442839c17d8494e3176e4 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 5 Aug 2018 16:30:21 -0400 Subject: [PATCH 078/329] update --- ann_class/sklearn_ann.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ann_class/sklearn_ann.py b/ann_class/sklearn_ann.py index 9c3ce2b4..5c4e5fa0 100644 --- a/ann_class/sklearn_ann.py +++ b/ann_class/sklearn_ann.py @@ -17,13 +17,7 @@ from sklearn.utils import shuffle # get the data -X, Y = get_data() - -# split into train and test -X, Y = shuffle(X, Y) -Ntrain = int(0.7*len(X)) -Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain] -Xtest, Ytest = X[Ntrain:], Y[Ntrain:] +Xtrain, Ytrain, Xtest, Ytest = get_data() # create the neural network model = MLPClassifier(hidden_layer_sizes=(20, 20), max_iter=2000) From 15caf3f380698598b7fecbb44b23f8b263752de7 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 7 Aug 2018 22:50:20 -0400 Subject: [PATCH 079/329] update --- cnn_class2/class_activation_maps.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cnn_class2/class_activation_maps.py b/cnn_class2/class_activation_maps.py index e85ab504..19033ff5 100644 --- a/cnn_class2/class_activation_maps.py +++ b/cnn_class2/class_activation_maps.py @@ -16,11 +16,11 @@ from glob import glob -from skimage.transform import rescale, resize - # get the image files +# http://www.vision.caltech.edu/Image_Datasets/Caltech101/ +# http://www.vision.caltech.edu/Image_Datasets/Caltech256/ image_files = glob('../large_files/256_ObjectCategories/*/*.jp*g') image_files += glob('../large_files/101_ObjectCategories/*/*.jp*g') From a12a70ba2de874ace055cdf1193a2d3fd7a27158 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 7 Aug 2018 23:52:14 -0400 Subject: [PATCH 080/329] umap --- unsupervised_class2/umap_transformer.py | 40 +++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 unsupervised_class2/umap_transformer.py diff --git a/unsupervised_class2/umap_transformer.py b/unsupervised_class2/umap_transformer.py new file mode 100644 index 00000000..51e1a86b --- /dev/null +++ b/unsupervised_class2/umap_transformer.py @@ -0,0 +1,40 @@ +# https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python +# https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from datetime import datetime +from util import getKaggleMNIST +from sklearn.linear_model import LogisticRegression +from umap import UMAP + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +print("Score without transformation:") +model = LogisticRegression() +model.fit(Xtrain, Ytrain) +print(model.score(Xtrain, Ytrain)) +print(model.score(Xtest, Ytest)) + + +umapper = UMAP(n_neighbors=5, n_components=10) +t0 = datetime.now() +Ztrain = umapper.fit_transform(Xtrain) +print("umap fit_transform took:", datetime.now() - t0) +t0 = datetime.now() +Ztest = umapper.transform(Xtest) +print("umap transform took:", datetime.now() - t0) + +print("Score with transformation") +model = LogisticRegression() +t0 = datetime.now() +model.fit(Ztrain, Ytrain) +print("logistic regression fit took:", datetime.now() - t0) +print(model.score(Ztrain, Ytrain)) +print(model.score(Ztest, Ytest)) \ No newline at end of file From 0fab6af445d22b530ab9efd557cbd2fbbf28e099 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Wed, 8 Aug 2018 13:30:40 -0400 Subject: [PATCH 081/329] remove y2i --- cnn_class/cnn_theano.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cnn_class/cnn_theano.py b/cnn_class/cnn_theano.py index 21565fab..311577d6 100644 --- a/cnn_class/cnn_theano.py +++ b/cnn_class/cnn_theano.py @@ -18,7 +18,7 @@ from datetime import datetime -from benchmark import get_data, y2indicator, error_rate +from benchmark import get_data, error_rate def relu(a): From f6d624e45fdd4c4c8c456f7f3def57021738ff74 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 23 Aug 2018 14:44:17 -0400 Subject: [PATCH 082/329] update --- nlp_class3/cnn_toxic.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nlp_class3/cnn_toxic.py b/nlp_class3/cnn_toxic.py index 1cc5b48a..5ecf0443 100644 --- a/nlp_class3/cnn_toxic.py +++ b/nlp_class3/cnn_toxic.py @@ -56,12 +56,6 @@ possible_labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] targets = train[possible_labels].values -print("max sequence length:", max(len(s) for s in sentences)) -print("min sequence length:", min(len(s) for s in sentences)) -s = sorted(len(s) for s in sentences) -print("median sequence length:", s[len(s) // 2]) - - # convert the sentences (strings) into integers @@ -71,6 +65,12 @@ # print("sequences:", sequences); exit() +print("max sequence length:", max(len(s) for s in sequences)) +print("min sequence length:", min(len(s) for s in sequences)) +s = sorted(len(s) for s in sequences) +print("median sequence length:", s[len(s) // 2]) + + # get word -> integer mapping word2idx = tokenizer.word_index print('Found %s unique tokens.' % len(word2idx)) From ff779715c5ea37f88cc39a6729747ce3cbc705df Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 25 Aug 2018 22:12:25 -0400 Subject: [PATCH 083/329] keras examples --- keras_examples/ann.py | 71 +++++++++++ keras_examples/basic_mlp.py | 44 +++++++ keras_examples/batchnorm.py | 75 +++++++++++ keras_examples/cnn.py | 81 ++++++++++++ keras_examples/cnn_cifar.py | 91 +++++++++++++ keras_examples/cnn_dropout_batchnorm.py | 84 ++++++++++++ keras_examples/dropout.py | 74 +++++++++++ keras_examples/sentiment_analysis.py | 107 ++++++++++++++++ keras_examples/sine.py | 79 ++++++++++++ keras_examples/sine2.py | 80 ++++++++++++ keras_examples/translation.py | 162 ++++++++++++++++++++++++ keras_examples/util.py | 74 +++++++++++ 12 files changed, 1022 insertions(+) create mode 100644 keras_examples/ann.py create mode 100644 keras_examples/basic_mlp.py create mode 100644 keras_examples/batchnorm.py create mode 100644 keras_examples/cnn.py create mode 100644 keras_examples/cnn_cifar.py create mode 100644 keras_examples/cnn_dropout_batchnorm.py create mode 100644 keras_examples/dropout.py create mode 100644 keras_examples/sentiment_analysis.py create mode 100644 keras_examples/sine.py create mode 100644 keras_examples/sine2.py create mode 100644 keras_examples/translation.py create mode 100644 keras_examples/util.py diff --git a/keras_examples/ann.py b/keras_examples/ann.py new file mode 100644 index 00000000..08636b15 --- /dev/null +++ b/keras_examples/ann.py @@ -0,0 +1,71 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from util import getKaggleMNIST +from keras.models import Model +from keras.layers import Dense, Activation, Input + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# get shapes +N, D = Xtrain.shape +K = len(set(Ytrain)) + + +# ANN with layers [784] -> [500] -> [300] -> [10] +i = Input(shape=(D,)) +x = Dense(500, activation='relu')(i) +x = Dense(300, activation='relu')(x) +x = Dense(K, activation='softmax')(x) + +# instantiate the model object +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + +# make predictions and evaluate +probs = model.predict(Xtest) # N x K matrix of probabilities +Ptest = np.argmax(probs, axis=1) +print("Validation acc:", np.mean(Ptest == Ytest)) + diff --git a/keras_examples/basic_mlp.py b/keras_examples/basic_mlp.py new file mode 100644 index 00000000..8c79533f --- /dev/null +++ b/keras_examples/basic_mlp.py @@ -0,0 +1,44 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from util import getKaggleMNIST, getKaggleFashionMNIST +from sklearn.neural_network import MLPClassifier + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleFashionMNIST() + +# inspect your data +print(Xtrain.shape) +print(Ytrain.shape) + +# look at an example +i = np.random.choice(Xtrain.shape[0]) +plt.imshow(Xtrain[i].reshape(28, 28)) +plt.title(Ytrain[i]) +plt.show() + +# instantiate the model +model = MLPClassifier() + +# train the model +model.fit(Xtrain, Ytrain) + +# evaluate the model +print(model.score(Xtrain, Ytrain)) +print(model.score(Xtest, Ytest)) + +# for completion's sake, this is how you make predictions +Ptest = model.predict(Xtest) + +# an alternate way to calculate accuracy +print(np.mean(Ptest == Ytest)) + +# get output probabilities +probs = model.predict_proba(Xtest) +print("np.argmax(probs, axis=1) == Ptest?", np.all(np.argmax(probs, axis=1) == Ptest)) \ No newline at end of file diff --git a/keras_examples/batchnorm.py b/keras_examples/batchnorm.py new file mode 100644 index 00000000..8b041729 --- /dev/null +++ b/keras_examples/batchnorm.py @@ -0,0 +1,75 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from util import getKaggleMNIST +from keras.models import Model +from keras.layers import Dense, Activation, Input, BatchNormalization + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# get shapes +N, D = Xtrain.shape +K = len(set(Ytrain)) + + +# ANN with layers [784] -> [500] -> [300] -> [10] +i = Input(shape=(D,)) +x = Dense(500)(i) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = Dense(300)(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = Dense(K, activation='softmax')(x) + +# instantiate the model object +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + +# make predictions and evaluate +probs = model.predict(Xtest) # N x K matrix of probabilities +Ptest = np.argmax(probs, axis=1) +print("Validation acc:", np.mean(Ptest == Ytest)) + diff --git a/keras_examples/cnn.py b/keras_examples/cnn.py new file mode 100644 index 00000000..088cc5b2 --- /dev/null +++ b/keras_examples/cnn.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Model +from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Input + +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + +from util import getKaggleMNIST3D, getKaggleFashionMNIST3D, getCIFAR10 + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleFashionMNIST3D() + +# get shapes +N, H, W, C = Xtrain.shape +K = len(set(Ytrain)) + + + + +# make the CNN +i = Input(shape=(H, W, C)) +x = Conv2D(filters=32, kernel_size=(3, 3))(i) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=64, kernel_size=(3, 3))(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dense(units=100)(x) +x = Activation('relu')(x) +x = Dense(units=K)(x) +x = Activation('softmax')(x) + +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + diff --git a/keras_examples/cnn_cifar.py b/keras_examples/cnn_cifar.py new file mode 100644 index 00000000..4bf16cda --- /dev/null +++ b/keras_examples/cnn_cifar.py @@ -0,0 +1,91 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Sequential, Model +from keras.layers import Dense, Activation, Lambda, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Input + +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + +from util import getCIFAR10 + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getCIFAR10() + +# get shapes +N, H, W, C = Xtrain.shape +K = len(set(Ytrain)) + + + + +# make the CNN +i = Input(shape=(H, W, C)) +x = Lambda(lambda x: (x - 127.5) / 127.5)(i) +x = Conv2D(filters=32, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=64, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=128, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dropout(0.5)(x) +x = Dense(units=300)(x) +x = Activation('relu')(x) +x = Dropout(0.2)(x) +x = Dense(units=K)(x) +x = Activation('softmax')(x) + +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=80, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + diff --git a/keras_examples/cnn_dropout_batchnorm.py b/keras_examples/cnn_dropout_batchnorm.py new file mode 100644 index 00000000..f89cd37d --- /dev/null +++ b/keras_examples/cnn_dropout_batchnorm.py @@ -0,0 +1,84 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Sequential, Model +from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Input + +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + +from util import getKaggleMNIST3D, getKaggleFashionMNIST3D, getCIFAR10 + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleFashionMNIST3D() + +# get shapes +N, H, W, C = Xtrain.shape +K = len(set(Ytrain)) + + + + +# make the CNN +i = Input(shape=(H, W, C)) +x = Conv2D(filters=32, kernel_size=(3, 3))(i) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=64, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dense(units=100)(x) +x = Activation('relu')(x) +x = Dropout(0.2)(x) +x = Dense(units=K)(x) +x = Activation('softmax')(x) + +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + diff --git a/keras_examples/dropout.py b/keras_examples/dropout.py new file mode 100644 index 00000000..0b027f0c --- /dev/null +++ b/keras_examples/dropout.py @@ -0,0 +1,74 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from util import getKaggleMNIST +from keras.models import Model +from keras.layers import Dense, Activation, Input, Dropout + + +# get the data +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# get shapes +N, D = Xtrain.shape +K = len(set(Ytrain)) + + +# ANN with layers [784] -> [500] -> [300] -> [10] +i = Input(shape=(D,)) +x = Dropout(0.2)(i) +x = Dense(500, activation='relu')(x) +x = Dropout(0.5)(x) +x = Dense(300, activation='relu')(x) +x = Dropout(0.5)(x) +x = Dense(K, activation='softmax')(x) + +# instantiate the model object +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + +# make predictions and evaluate +probs = model.predict(Xtest) # N x K matrix of probabilities +Ptest = np.argmax(probs, axis=1) +print("Validation acc:", np.mean(Ptest == Ytest)) + diff --git a/keras_examples/sentiment_analysis.py b/keras_examples/sentiment_analysis.py new file mode 100644 index 00000000..96885004 --- /dev/null +++ b/keras_examples/sentiment_analysis.py @@ -0,0 +1,107 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +import sys +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from keras.models import Model +from keras.layers import Dense, Embedding, Input +from keras.layers import LSTM, GRU +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences + +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + + + +# some configuration +MAX_SEQUENCE_LENGTH = 50 +MAX_VOCAB_SIZE = 20000 +EMBEDDING_DIM = 10 +VALIDATION_SPLIT = 0.2 +BATCH_SIZE = 128 +EPOCHS = 5 + + + +# get the data at: https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews +# prepare text samples and their labels +print('Loading in data...') +train = pd.read_csv("../large_files/kaggle-sentiment-analysis/train.tsv", sep='\t') +sentences = train["Phrase"].values +targets = (train['Sentiment'].values > 3) +K = len(set(targets)) + + + + +# convert the sentences (strings) into integers +tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE) +tokenizer.fit_on_texts(sentences) +sequences = tokenizer.texts_to_sequences(sentences) + +print("max sequence length:", max(len(s) for s in sequences)) +print("min sequence length:", min(len(s) for s in sequences)) +s = sorted(len(s) for s in sequences) +print("median sequence length:", s[len(s) // 2]) + +maxlen = min(max(len(s) for s in sequences), MAX_SEQUENCE_LENGTH) + + + +# get word -> integer mapping +word2idx = tokenizer.word_index +print('Found %s unique tokens.' % len(word2idx)) + + +# pad sequences so that we get a N x T matrix +data = pad_sequences(sequences, maxlen=maxlen) +print('Shape of data tensor:', data.shape) + + +print('Building model...') + +# create an LSTM network with a single LSTM +input_ = Input(shape=(maxlen,)) +x = Embedding(len(word2idx) + 1, EMBEDDING_DIM)(input_) +x = LSTM(5)(x) +output = Dense(K, activation='softmax')(x) + +model = Model(input_, output) +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + + +print('Training model...') +r = model.fit( + data, + targets, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=VALIDATION_SPLIT +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() diff --git a/keras_examples/sine.py b/keras_examples/sine.py new file mode 100644 index 00000000..87580986 --- /dev/null +++ b/keras_examples/sine.py @@ -0,0 +1,79 @@ +# https://lazyprogrammer.me +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from keras.models import Model +from keras.layers import Input, LSTM, GRU, SimpleRNN, Dense +import keras.backend as K + +from keras.optimizers import SGD, Adam + + +# make the original data +series = np.sin(0.1*np.arange(200)) + np.random.randn(200)*0.1 + +# plot it +plt.plot(series) +plt.show() + + +### build the dataset +# let's see if we can use T past values to predict the next value +T = 10 +D = 1 +X = [] +Y = [] +for t in range(len(series) - T - 1): + x = series[t:t+T] + # print("x[-1]:", x[-1]) + X.append(x) + y = series[t+T] + # print("y:", y) + Y.append(y) + +X = np.array(X) +Y = np.array(Y) +N = len(X) + + + +### many-to-one RNN +inputs = np.expand_dims(X, -1) + +# make the RNN +i = Input(shape=(T, D)) +x = SimpleRNN(5)(i) +x = Dense(1)(x) +model = Model(i, x) +model.compile( + loss='mse', + optimizer=Adam(lr=0.1), +) + +# train the RNN +r = model.fit( + inputs[:-N//2], Y[:-N//2], + batch_size=32, + epochs=80, + validation_data=(inputs[-N//2:], Y[-N//2:]), +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + + +# plot predictions vs targets +outputs = model.predict(inputs) +print(outputs.shape) +predictions = outputs[:,0] + +plt.plot(Y, label='targets') +plt.plot(predictions, label='predictions') +plt.title("many-to-one RNN") +plt.legend() +plt.show() + diff --git a/keras_examples/sine2.py b/keras_examples/sine2.py new file mode 100644 index 00000000..cb6cc1a7 --- /dev/null +++ b/keras_examples/sine2.py @@ -0,0 +1,80 @@ +# https://lazyprogrammer.me +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from keras.models import Model +from keras.layers import Input, SimpleRNN, Dense +from keras.optimizers import SGD, Adam + + +# make the original data +series1 = np.sin(0.1*np.arange(200)) +series2 = np.sin(0.2*np.arange(200)) + +# plot it +plt.plot(series1) +plt.plot(series2) +plt.show() + + +### build the dataset +# let's see if we can use T past values to predict the next value +T = 10 +D = 2 +X = [] +Y = [] +for t in range(len(series1) - T - 1): + x = [series1[t:t+T], series2[t:t+T]] + # print("x[-1]:", x[-1]) + X.append(x) + y = series1[t+T] + series2[t+T] + # print("y:", y) + Y.append(y) + +X = np.array(X) +print("X.shape:", X.shape) +X = np.transpose(X, (0, 2, 1)) +Y = np.array(Y) +N = len(X) + + + +### many-to-one RNN + +# make the RNN +i = Input(shape=(T, D)) +x = SimpleRNN(5)(i) +x = Dense(1)(x) +model = Model(i, x) +model.compile( + loss='mse', + optimizer=Adam(lr=0.1), +) + +# train the RNN +r = model.fit( + X[:-N//2], Y[:-N//2], + batch_size=32, + epochs=80, + validation_data=(X[-N//2:], Y[-N//2:]), +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + + +# plot predictions vs targets +outputs = model.predict(X) +print(outputs.shape) +predictions = outputs[:,0] + +plt.plot(Y, label='targets') +plt.plot(predictions, label='predictions') +plt.title("many-to-one RNN") +plt.legend() +plt.show() + diff --git a/keras_examples/translation.py b/keras_examples/translation.py new file mode 100644 index 00000000..2449f2f7 --- /dev/null +++ b/keras_examples/translation.py @@ -0,0 +1,162 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +import sys +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from keras.models import Model +from keras.layers import Dense, Embedding, Input +from keras.layers import LSTM, Bidirectional +from keras.preprocessing.text import Tokenizer +from keras.preprocessing.sequence import pad_sequences +from keras.optimizers import Adam + +import keras.backend as K +if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU + + + + +# some configuration +MAX_VOCAB_SIZE = 20000 +EMBEDDING_DIM = 20 +VALIDATION_SPLIT = 0.2 +BATCH_SIZE = 128 +EPOCHS = 30 +NUM_SAMPLES = 10000 + + + +# Where we will store the data +input_texts = [] # sentence in original language +target_texts = [] # sentence in target language + + +# load in the data +# download the data at: http://www.manythings.org/anki/ +t = 0 +for line in open('../large_files/translation/spa.txt'): + # only keep a limited number of samples + t += 1 + if t > NUM_SAMPLES: + break + + # input and target are separated by tab + if '\t' not in line: + continue + + # split up the input and translation + input_text, translation = line.rstrip().split('\t') + + input_texts.append(input_text) + target_texts.append(translation) +print("num samples:", len(input_texts)) + + + +# tokenize the inputs +tokenizer_inputs = Tokenizer(num_words=MAX_VOCAB_SIZE) +tokenizer_inputs.fit_on_texts(input_texts) +input_sequences = tokenizer_inputs.texts_to_sequences(input_texts) + +# get the word to index mapping for input language +word2idx_inputs = tokenizer_inputs.word_index +print('Found %s unique input tokens.' % len(word2idx_inputs)) +num_words_input = len(word2idx_inputs) + 1 + +# determine maximum length input sequence +max_len_input = max(len(s) for s in input_sequences) + +# tokenize the outputs +# don't filter out special characters +# otherwise and won't appear +tokenizer_outputs = Tokenizer(num_words=MAX_VOCAB_SIZE, filters='') +tokenizer_outputs.fit_on_texts(target_texts) +target_sequences = tokenizer_outputs.texts_to_sequences(target_texts) + +# get the word to index mapping for output language +word2idx_outputs = tokenizer_outputs.word_index +print('Found %s unique output tokens.' % len(word2idx_outputs)) + +# store number of output words for later +# remember to add 1 since indexing starts at 1 +num_words_output = len(word2idx_outputs) + 1 + +# determine maximum length output sequence +max_len_target = max(len(s) for s in target_sequences) + + +max_len_both = max(max_len_input, max_len_target) + + + +# pad the sequences +inputs_padded = pad_sequences(input_sequences, maxlen=max_len_both) +targets_padded = pad_sequences(target_sequences, maxlen=max_len_both) + + + +# create targets, since we cannot use sparse +# categorical cross entropy when we have sequences +targets_padded_one_hot = np.zeros( + ( + len(targets_padded), + max_len_both, + num_words_output + ), + dtype='float32' +) + +# assign the values +for i, d in enumerate(targets_padded): + for t, word in enumerate(d): + targets_padded_one_hot[i, t, word] = 1 + + + + +print('Building model...') + +# create an LSTM network with a single LSTM +input_ = Input(shape=(max_len_both,)) +x = Embedding(num_words_input, EMBEDDING_DIM)(input_) +x = Bidirectional(LSTM(15, return_sequences=True))(x) +output = Dense(num_words_output, activation='softmax')(x) + +model = Model(input_, output) +model.compile( + loss='categorical_crossentropy', + optimizer=Adam(lr=0.1), + metrics=['accuracy'] +) + + +print('Training model...') +r = model.fit( + inputs_padded, + targets_padded_one_hot, + batch_size=BATCH_SIZE, + epochs=EPOCHS, + validation_split=VALIDATION_SPLIT +) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() diff --git a/keras_examples/util.py b/keras_examples/util.py new file mode 100644 index 00000000..2e3af106 --- /dev/null +++ b/keras_examples/util.py @@ -0,0 +1,74 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd + +from sklearn.utils import shuffle +from scipy.io import loadmat + + +def getKaggleMNIST(): + # https://www.kaggle.com/c/digit-recognizer + return getMNISTFormat('../large_files/train.csv') + + +def getKaggleFashionMNIST(): + # https://www.kaggle.com/zalando-research/fashionmnist + return getMNISTFormat('../large_files/fashionmnist/fashion-mnist_train.csv') + +def getMNISTFormat(path): + # MNIST data: + # column 0 is labels + # column 1-785 is data, with values 0 .. 255 + # total size of CSV: (42000, 1, 28, 28) + train = pd.read_csv(path).values.astype(np.float32) + train = shuffle(train) + + Xtrain = train[:-1000,1:] / 255.0 + Ytrain = train[:-1000,0].astype(np.int32) + + Xtest = train[-1000:,1:] / 255.0 + Ytest = train[-1000:,0].astype(np.int32) + return Xtrain, Ytrain, Xtest, Ytest + +def getKaggleMNIST3D(): + Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + Xtrain = Xtrain.reshape(-1, 28, 28, 1) + Xtest = Xtest.reshape(-1, 28, 28, 1) + return Xtrain, Ytrain, Xtest, Ytest + +def getKaggleFashionMNIST3D(): + Xtrain, Ytrain, Xtest, Ytest = getKaggleFashionMNIST() + Xtrain = Xtrain.reshape(-1, 28, 28, 1) + Xtest = Xtest.reshape(-1, 28, 28, 1) + return Xtrain, Ytrain, Xtest, Ytest + +def getCIFAR10(): + Xtrain = np.zeros((50000, 32, 32, 3), dtype=np.uint8) + Ytrain = np.zeros(50000, dtype=np.uint8) + + # train data + for i in range(5): + fn = 'data_batch_%s.mat' % (i+1) + d = loadmat('../large_files/cifar-10-batches-mat/' + fn) + x = d['data'] + y = d['labels'].flatten() + x = x.reshape(10000, 3, 32, 32) + x = np.transpose(x, (0, 2, 3, 1)) + Xtrain[i*10000:(i+1)*10000] = x + Ytrain[i*10000:(i+1)*10000] = y + + # test data + d = loadmat('../large_files/cifar-10-batches-mat/test_batch.mat') + x = d['data'] + y = d['labels'].flatten() + x = x.reshape(10000, 3, 32, 32) + x = np.transpose(x, (0, 2, 3, 1)) + Xtest = x + Ytest = y + + return Xtrain, Ytrain, Xtest, Ytest + From 5badbf36e47f52f9d1d53688eb4766e67a64aa79 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 27 Aug 2018 12:34:52 -0400 Subject: [PATCH 084/329] update --- nlp_class2/pmi.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/nlp_class2/pmi.py b/nlp_class2/pmi.py index 28690a12..b321e91f 100644 --- a/nlp_class2/pmi.py +++ b/nlp_class2/pmi.py @@ -152,9 +152,13 @@ def remove_punctuation_3(s): # PMI(w, c) = #(w, c) / #(w) / p(c) -pmi = wc_counts / wc_counts.sum(axis=1) / c_probs +# pmi = wc_counts / wc_counts.sum(axis=1) / c_probs # works only if numpy arrays +pmi = wc_counts.multiply(1.0 / wc_counts.sum(axis=1) / c_probs).tocsr() +# this operation changes it to a coo_matrix +# which doesn't have functions we need, e.g log1p() +# so convert it back to a csr print("type(pmi):", type(pmi)) -logX = np.log(pmi.A + 1) +logX = pmi.log1p() # would be logX = np.log(pmi.A + 1) in numpy print("type(logX):", type(logX)) logX[logX < 0] = 0 @@ -180,7 +184,9 @@ def remove_punctuation_3(s): for epoch in range(10): print("epoch:", epoch) delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX - cost = ( delta * delta ).sum() + # cost = ( delta * delta ).sum() + cost = np.multiply(delta, delta).sum() + # * behaves differently if delta is a "matrix" object vs "array" object costs.append(cost) ### partially vectorized updates ### From 994c7fca97ec4246053b49441034c733db0fd3ee Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 12 Sep 2018 14:10:59 -0400 Subject: [PATCH 085/329] recommenders --- recommenders/autorec.py | 126 ++++++++++++++++++++++ recommenders/extra_reading.txt | 53 +++++++++ recommenders/itembased.py | 171 ++++++++++++++++++++++++++++++ recommenders/mf.py | 146 +++++++++++++++++++++++++ recommenders/mf2.py | 159 +++++++++++++++++++++++++++ recommenders/mf_keras.py | 100 +++++++++++++++++ recommenders/mf_keras_deep.py | 89 ++++++++++++++++ recommenders/mf_keras_res.py | 98 +++++++++++++++++ recommenders/preprocess.py | 41 +++++++ recommenders/preprocess2dict.py | 82 ++++++++++++++ recommenders/preprocess2sparse.py | 62 +++++++++++ recommenders/preprocess_shrink.py | 58 ++++++++++ recommenders/userbased.py | 168 +++++++++++++++++++++++++++++ 13 files changed, 1353 insertions(+) create mode 100644 recommenders/autorec.py create mode 100644 recommenders/extra_reading.txt create mode 100644 recommenders/itembased.py create mode 100644 recommenders/mf.py create mode 100644 recommenders/mf2.py create mode 100644 recommenders/mf_keras.py create mode 100644 recommenders/mf_keras_deep.py create mode 100644 recommenders/mf_keras_res.py create mode 100644 recommenders/preprocess.py create mode 100644 recommenders/preprocess2dict.py create mode 100644 recommenders/preprocess2sparse.py create mode 100644 recommenders/preprocess_shrink.py create mode 100644 recommenders/userbased.py diff --git a/recommenders/autorec.py b/recommenders/autorec.py new file mode 100644 index 00000000..02ff05b9 --- /dev/null +++ b/recommenders/autorec.py @@ -0,0 +1,126 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle +from scipy.sparse import save_npz, load_npz + +import keras.backend as K +from keras.models import Model +from keras.layers import Input, Dropout, Dense +from keras.regularizers import l2 +from keras.optimizers import SGD + +# config +batch_size = 128 +epochs = 20 +reg = 0.0001 +# reg = 0 + +A = load_npz("Atrain.npz") +A_test = load_npz("Atest.npz") +mask = (A > 0) * 1.0 +mask_test = (A_test > 0) * 1.0 + +# make copies since we will shuffle +A_copy = A.copy() +mask_copy = mask.copy() +A_test_copy = A_test.copy() +mask_test_copy = mask_test.copy() + +N, M = A.shape +print("N:", N, "M:", M) +print("N // batch_size:", N // batch_size) + +# center the data +mu = A.sum() / mask.sum() +print("mu:", mu) + + + +# build the model - just a 1 hidden layer autoencoder +i = Input(shape=(M,)) +# bigger hidden layer size seems to help! +x = Dropout(0.7)(i) +x = Dense(700, activation='tanh', kernel_regularizer=l2(reg))(x) +# x = Dropout(0.5)(x) +x = Dense(M, kernel_regularizer=l2(reg))(x) + + + +def custom_loss(y_true, y_pred): + mask = K.cast(K.not_equal(y_true, 0), dtype='float32') + diff = y_pred - y_true + sqdiff = diff * diff * mask + sse = K.sum(K.sum(sqdiff)) + n = K.sum(K.sum(mask)) + return sse / n + + +def generator(A, M): + while True: + A, M = shuffle(A, M) + for i in range(A.shape[0] // batch_size + 1): + upper = min((i+1)*batch_size, A.shape[0]) + a = A[i*batch_size:upper].toarray() + m = M[i*batch_size:upper].toarray() + a = a - mu * m # must keep zeros at zero! + # m2 = (np.random.random(a.shape) > 0.5) + # noisy = a * m2 + noisy = a # no noise + yield noisy, a + + +def test_generator(A, M, A_test, M_test): + # assumes A and A_test are in corresponding order + # both of size N x M + while True: + for i in range(A.shape[0] // batch_size + 1): + upper = min((i+1)*batch_size, A.shape[0]) + a = A[i*batch_size:upper].toarray() + m = M[i*batch_size:upper].toarray() + at = A_test[i*batch_size:upper].toarray() + mt = M_test[i*batch_size:upper].toarray() + a = a - mu * m + at = at - mu * mt + yield a, at + + + +model = Model(i, x) +model.compile( + loss=custom_loss, + optimizer=SGD(lr=0.08, momentum=0.9), + # optimizer='adam', + metrics=[custom_loss], +) + + +r = model.fit_generator( + generator(A, mask), + validation_data=test_generator(A_copy, mask_copy, A_test_copy, mask_test_copy), + epochs=epochs, + steps_per_epoch=A.shape[0] // batch_size + 1, + validation_steps=A_test.shape[0] // batch_size + 1, +) +print(r.history.keys()) + + + +# plot losses +plt.plot(r.history['loss'], label="train loss") +plt.plot(r.history['val_loss'], label="test loss") +plt.legend() +plt.show() + +# plot mse +plt.plot(r.history['custom_loss'], label="train mse") +plt.plot(r.history['val_custom_loss'], label="test mse") +plt.legend() +plt.show() diff --git a/recommenders/extra_reading.txt b/recommenders/extra_reading.txt new file mode 100644 index 00000000..fc0267bb --- /dev/null +++ b/recommenders/extra_reading.txt @@ -0,0 +1,53 @@ +How Hacker News ranking really works: scoring, controversy, and penalties +http://www.righto.com/2013/11/how-hacker-news-ranking-really-works.html + +The Evolution Of Hacker News +https://techcrunch.com/2013/05/18/the-evolution-of-hacker-news/ + +Reddit sorting code +https://github.com/reddit-archive/reddit/blob/master/r2/r2/lib/db/_sorts.pyx + +Revealed: US spy operation that manipulates social media +https://www.theguardian.com/technology/2011/mar/17/us-spy-operation-social-networks + +Learning to rank +https://en.wikipedia.org/wiki/Learning_to_rank#Evaluation_measures + +How Not To Sort By Average Rating +https://www.evanmiller.org/how-not-to-sort-by-average-rating.html + +Wilson score interval +https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval + +reddit’s new comment sorting system +https://redditblog.com/2009/10/15/reddits-new-comment-sorting-system/ + +Markov Chains Explained Visually +http://setosa.io/ev/markov-chains/ + +An algorithmic framework for performing collaborative filtering +https://dl.acm.org/citation.cfm?id=312682 + +Item-based collaborative filtering recommendation algorithms +https://dl.acm.org/citation.cfm?id=372071 + +FunkSVD +http://sifter.org/~simon/journal/20061211.html + +Probabilistic Matrix Factorization +https://papers.nips.cc/paper/3208-probabilistic-matrix-factorization.pdf + +Bayesian Probabilistic Matrix Factorization using Markov Chain Monte Carlo +https://www.cs.toronto.edu/~amnih/papers/bpmf.pdf + +Algorithms for Non-negative Matrix Factorization +https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf + +Learning the parts of objects by non-negative matrix factorization +http://www.columbia.edu/~jwp2128/Teaching/E4903/papers/nmf_nature.pdf + +Restricted Boltzmann Machines for Collaborative Filtering +https://www.cs.toronto.edu/~rsalakhu/papers/rbmcf.pdf + +AutoRec: Autoencoders Meet Collaborative Filtering +http://users.cecs.anu.edu.au/~u5098633/papers/www15.pdf \ No newline at end of file diff --git a/recommenders/itembased.py b/recommenders/itembased.py new file mode 100644 index 00000000..f87f9481 --- /dev/null +++ b/recommenders/itembased.py @@ -0,0 +1,171 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle +from datetime import datetime +from sortedcontainers import SortedList + +# load in the data +import os +if not os.path.exists('user2movie.json') or \ + not os.path.exists('movie2user.json') or \ + not os.path.exists('usermovie2rating.json') or \ + not os.path.exists('usermovie2rating_test.json'): + import preprocess2dict + + +with open('user2movie.json', 'rb') as f: + user2movie = pickle.load(f) + +with open('movie2user.json', 'rb') as f: + movie2user = pickle.load(f) + +with open('usermovie2rating.json', 'rb') as f: + usermovie2rating = pickle.load(f) + +with open('usermovie2rating_test.json', 'rb') as f: + usermovie2rating_test = pickle.load(f) + + +N = np.max(list(user2movie.keys())) + 1 +# the test set may contain movies the train set doesn't have data on +m1 = np.max(list(movie2user.keys())) +m2 = np.max([m for (u, m), r in usermovie2rating_test.items()]) +M = max(m1, m2) + 1 +print("N:", N, "M:", M) + +if M > 2000: + print("N =", N, "are you sure you want to continue?") + print("Comment out these lines if so...") + exit() + + +# to find the user similarities, you have to do O(M^2 * N) calculations! +# in the "real-world" you'd want to parallelize this +# note: we really only have to do half the calculations, since w_ij is symmetric +K = 20 # number of neighbors we'd like to consider +limit = 5 # number of common movies users must have in common in order to consider +neighbors = [] # store neighbors in this list +averages = [] # each item's average rating for later use +deviations = [] # each item's deviation for later use + +for i in range(M): + # find the K closest items to item i + users_i = movie2user[i] + users_i_set = set(users_i) + + # calculate avg and deviation + ratings_i = { user:usermovie2rating[(user, i)] for user in users_i } + avg_i = np.mean(list(ratings_i.values())) + dev_i = { user:(rating - avg_i) for user, rating in ratings_i.items() } + dev_i_values = np.array(list(dev_i.values())) + sigma_i = np.sqrt(dev_i_values.dot(dev_i_values)) + + # save these for later use + averages.append(avg_i) + deviations.append(dev_i) + + sl = SortedList() + for j in range(M): + # don't include yourself + if j != i: + users_j = movie2user[j] + users_j_set = set(users_j) + common_users = (users_i_set & users_j_set) # intersection + if len(common_users) > limit: + # calculate avg and deviation + ratings_j = { user:usermovie2rating[(user, j)] for user in users_j } + avg_j = np.mean(list(ratings_j.values())) + dev_j = { user:(rating - avg_j) for user, rating in ratings_j.items() } + dev_j_values = np.array(list(dev_j.values())) + sigma_j = np.sqrt(dev_j_values.dot(dev_j_values)) + + # calculate correlation coefficient + numerator = sum(dev_i[m]*dev_j[m] for m in common_users) + w_ij = numerator / (sigma_i * sigma_j) + + # insert into sorted list and truncate + # negate weight, because list is sorted ascending + # maximum value (1) is "closest" + sl.add((-w_ij, j)) + if len(sl) > K: + del sl[-1] + + # store the neighbors + neighbors.append(sl) + + # print out useful things + if i % 1 == 0: + print(i) + + + +# using neighbors, calculate train and test MSE + +def predict(i, u): + # calculate the weighted sum of deviations + numerator = 0 + denominator = 0 + for neg_w, j in neighbors[i]: + # remember, the weight is stored as its negative + # so the negative of the negative weight is the positive weight + try: + numerator += -neg_w * deviations[j][u] + denominator += abs(neg_w) + except KeyError: + # neighbor may not have been rated by the same user + # don't want to do dictionary lookup twice + # so just throw exception + pass + + if denominator == 0: + prediction = averages[i] + else: + prediction = numerator / denominator + averages[i] + prediction = min(5, prediction) + prediction = max(0.5, prediction) # min rating is 0.5 + return prediction + + + +train_predictions = [] +train_targets = [] +for (u, m), target in usermovie2rating.items(): + # calculate the prediction for this movie + prediction = predict(m, u) + + # save the prediction and target + train_predictions.append(prediction) + train_targets.append(target) + +test_predictions = [] +test_targets = [] +# same thing for test set +for (u, m), target in usermovie2rating_test.items(): + # calculate the prediction for this movie + prediction = predict(m, u) + + # save the prediction and target + test_predictions.append(prediction) + test_targets.append(target) + + +# calculate accuracy +def mse(p, t): + p = np.array(p) + t = np.array(t) + return np.mean((p - t)**2) + +print('train mse:', mse(train_predictions, train_targets)) +print('test mse:', mse(test_predictions, test_targets)) + + + diff --git a/recommenders/mf.py b/recommenders/mf.py new file mode 100644 index 00000000..7864c64a --- /dev/null +++ b/recommenders/mf.py @@ -0,0 +1,146 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle +from datetime import datetime + +# load in the data +import os +if not os.path.exists('user2movie.json') or \ + not os.path.exists('movie2user.json') or \ + not os.path.exists('usermovie2rating.json') or \ + not os.path.exists('usermovie2rating_test.json'): + import preprocess2dict + + +with open('user2movie.json', 'rb') as f: + user2movie = pickle.load(f) + +with open('movie2user.json', 'rb') as f: + movie2user = pickle.load(f) + +with open('usermovie2rating.json', 'rb') as f: + usermovie2rating = pickle.load(f) + +with open('usermovie2rating_test.json', 'rb') as f: + usermovie2rating_test = pickle.load(f) + + +N = np.max(list(user2movie.keys())) + 1 +# the test set may contain movies the train set doesn't have data on +m1 = np.max(list(movie2user.keys())) +m2 = np.max([m for (u, m), r in usermovie2rating_test.items()]) +M = max(m1, m2) + 1 +print("N:", N, "M:", M) + + +# initialize variables +K = 10 # latent dimensionality +W = np.random.randn(N, K) +b = np.zeros(N) +U = np.random.randn(M, K) +c = np.zeros(M) +mu = np.mean(list(usermovie2rating.values())) + +# prediction[i,j] = W[i].dot(U[j]) + b[i] + c.T[j] + mu + +def get_loss(d): + # d: (user_id, movie_id) -> rating + N = float(len(d)) + sse = 0 + for k, r in d.items(): + i, j = k + p = W[i].dot(U[j]) + b[i] + c[j] + mu + sse += (p - r)*(p - r) + return sse / N + + +# train the parameters +epochs = 25 +reg = 0.01 # regularization penalty +train_losses = [] +test_losses = [] +for epoch in range(epochs): + print("epoch:", epoch) + epoch_start = datetime.now() + # perform updates + + # update W and b + t0 = datetime.now() + for i in range(N): + # for W + matrix = np.eye(K) * reg + vector = np.zeros(K) + + # for b + bi = 0 + for j in user2movie[i]: + r = usermovie2rating[(i,j)] + matrix += np.outer(U[j], U[j]) + vector += (r - b[i] - c[j] - mu)*U[j] + bi += (r - W[i].dot(U[j]) - c[j] - mu) + + # set the updates + W[i] = np.linalg.solve(matrix, vector) + b[i] = bi / ((1 + reg)*len(user2movie[i])) + + if i % (N//10) == 0: + print("i:", i, "N:", N) + print("updated W and b:", datetime.now() - t0) + + # update U and c + t0 = datetime.now() + for j in range(M): + # for U + matrix = np.eye(K) * reg + vector = np.zeros(K) + + # for c + cj = 0 + try: + for i in movie2user[j]: + r = usermovie2rating[(i,j)] + matrix += np.outer(W[i], W[i]) + vector += (r - b[i] - c[j] - mu)*W[i] + cj += (r - W[i].dot(U[j]) - b[i] - mu) + + # set the updates + U[j] = np.linalg.solve(matrix, vector) + c[j] = cj / ((1 + reg)*len(movie2user[j])) + + if j % (M//10) == 0: + print("j:", j, "M:", M) + except KeyError: + # possible not to have any ratings for a movie + pass + print("updated U and c:", datetime.now() - t0) + print("epoch duration:", datetime.now() - epoch_start) + + + # store train loss + t0 = datetime.now() + train_losses.append(get_loss(usermovie2rating)) + + # store test loss + test_losses.append(get_loss(usermovie2rating_test)) + print("calculate cost:", datetime.now() - t0) + print("train loss:", train_losses[-1]) + print("test loss:", test_losses[-1]) + + +print("train losses:", train_losses) +print("test losses:", test_losses) + +# plot losses +plt.plot(train_losses, label="train loss") +plt.plot(test_losses, label="test loss") +plt.legend() +plt.show() diff --git a/recommenders/mf2.py b/recommenders/mf2.py new file mode 100644 index 00000000..72dc5db7 --- /dev/null +++ b/recommenders/mf2.py @@ -0,0 +1,159 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle +from datetime import datetime +from copy import deepcopy + +# load in the data +import os +if not os.path.exists('user2movie.json') or \ + not os.path.exists('movie2user.json') or \ + not os.path.exists('usermovie2rating.json') or \ + not os.path.exists('usermovie2rating_test.json'): + import preprocess2dict + + +with open('user2movie.json', 'rb') as f: + user2movie = pickle.load(f) + +with open('movie2user.json', 'rb') as f: + movie2user = pickle.load(f) + +with open('usermovie2rating.json', 'rb') as f: + usermovie2rating = pickle.load(f) + +with open('usermovie2rating_test.json', 'rb') as f: + usermovie2rating_test = pickle.load(f) + + +N = np.max(list(user2movie.keys())) + 1 +# the test set may contain movies the train set doesn't have data on +m1 = np.max(list(movie2user.keys())) +m2 = np.max([m for (u, m), r in usermovie2rating_test.items()]) +M = max(m1, m2) + 1 +print("N:", N, "M:", M) + + +# convert user2movie and movie2user to include ratings +print("converting...") +user2movierating = {} +for i, movies in user2movie.items(): + r = np.array([usermovie2rating[(i,j)] for j in movies]) + user2movierating[i] = (movies, r) +movie2userrating = {} +for j, users in movie2user.items(): + r = np.array([usermovie2rating[(i,j)] for i in users]) + movie2userrating[j] = (users, r) + +# create a movie2user for test set, since we need it for loss +movie2userrating_test = {} +for (i, j), r in usermovie2rating_test.items(): + if j not in movie2userrating_test: + movie2userrating_test[j] = [[i], [r]] + else: + movie2userrating_test[j][0].append(i) + movie2userrating_test[j][1].append(r) +for j, (users, r) in movie2userrating_test.items(): + movie2userrating_test[j][1] = np.array(r) +print("conversion done") + +# initialize variables +K = 10 # latent dimensionality +W = np.random.randn(N, K) +b = np.zeros(N) +U = np.random.randn(M, K) +c = np.zeros(M) +mu = np.mean(list(usermovie2rating.values())) + + + +def get_loss(m2u): + # d: movie_id -> (user_ids, ratings) + N = 0. + sse = 0 + for j, (u_ids, r) in m2u.items(): + p = W[u_ids].dot(U[j]) + b[u_ids] + c[j] + mu + delta = p - r + sse += delta.dot(delta) + N += len(r) + return sse / N + + + +# train the parameters +epochs = 25 +reg = 0.1 # regularization penalty +train_losses = [] +test_losses = [] +for epoch in range(epochs): + print("epoch:", epoch) + epoch_start = datetime.now() + # perform updates + + # update W and b + t0 = datetime.now() + for i in range(N): + m_ids, r = user2movierating[i] + matrix = U[m_ids].T.dot(U[m_ids]) + np.eye(K) * reg + vector = (r - b[i] - c[m_ids] - mu).dot(U[m_ids]) + bi = (r - U[m_ids].dot(W[i]) - c[m_ids] - mu).sum() + + # set the updates + W[i] = np.linalg.solve(matrix, vector) + b[i] = bi / ((1 + reg)*len(user2movie[i])) + + if i % (N//10) == 0: + print("i:", i, "N:", N) + print("updated W and b:", datetime.now() - t0) + + + # update U and c + t0 = datetime.now() + for j in range(M): + try: + u_ids, r = movie2userrating[j] + matrix = W[u_ids].T.dot(W[u_ids]) + np.eye(K) * reg + vector = (r - b[u_ids] - c[j] - mu).dot(W[u_ids]) + cj = (r - W[u_ids].dot(U[j]) - b[u_ids] - mu).sum() + + # set the updates + U[j] = np.linalg.solve(matrix, vector) + c[j] = cj / ((1 + reg)*len(movie2user[j])) + + if j % (M//10) == 0: + print("j:", j, "M:", M) + except KeyError: + # possible not to have any ratings for a movie + pass + print("updated U and c:", datetime.now() - t0) + print("epoch duration:", datetime.now() - epoch_start) + + + # store train loss + t0 = datetime.now() + train_losses.append(get_loss(movie2userrating)) + + # store test loss + test_losses.append(get_loss(movie2userrating_test)) + print("calculate cost:", datetime.now() - t0) + print("train loss:", train_losses[-1]) + print("test loss:", test_losses[-1]) + + +print("train losses:", train_losses) +print("test losses:", test_losses) + +# plot losses +plt.plot(train_losses, label="train loss") +plt.plot(test_losses, label="test loss") +plt.legend() +plt.show() diff --git a/recommenders/mf_keras.py b/recommenders/mf_keras.py new file mode 100644 index 00000000..efc3315b --- /dev/null +++ b/recommenders/mf_keras.py @@ -0,0 +1,100 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle + +from keras.models import Model +from keras.layers import Input, Embedding, Dot, Add, Flatten +from keras.regularizers import l2 +from keras.optimizers import SGD, Adam + +# load in the data +df = pd.read_csv('../large_files/movielens-20m-dataset/edited_rating.csv') + +N = df.userId.max() + 1 # number of users +M = df.movie_idx.max() + 1 # number of movies + +# split into train and test +df = shuffle(df) +cutoff = int(0.8*len(df)) +df_train = df.iloc[:cutoff] +df_test = df.iloc[cutoff:] + +# initialize variables +K = 10 # latent dimensionality +mu = df_train.rating.mean() +epochs = 15 +reg = 0. # regularization penalty + + +# keras model +u = Input(shape=(1,)) +m = Input(shape=(1,)) +u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg))(u) # (N, 1, K) +m_embedding = Embedding(M, K, embeddings_regularizer=l2(reg))(m) # (N, 1, K) + +# subsubmodel = Model([u, m], [u_embedding, m_embedding]) +# user_ids = df_train.userId.values[0:5] +# movie_ids = df_train.movie_idx.values[0:5] +# print("user_ids.shape", user_ids.shape) +# p = subsubmodel.predict([user_ids, movie_ids]) +# print("p[0].shape:", p[0].shape) +# print("p[1].shape:", p[1].shape) +# exit() + + +u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg))(u) # (N, 1, 1) +m_bias = Embedding(M, 1, embeddings_regularizer=l2(reg))(m) # (N, 1, 1) +x = Dot(axes=2)([u_embedding, m_embedding]) # (N, 1, 1) + +# submodel = Model([u, m], x) +# user_ids = df_train.userId.values[0:5] +# movie_ids = df_train.movie_idx.values[0:5] +# p = submodel.predict([user_ids, movie_ids]) +# print("p.shape:", p.shape) +# exit() + + +x = Add()([x, u_bias, m_bias]) +x = Flatten()(x) # (N, 1) + +model = Model(inputs=[u, m], outputs=x) +model.compile( + loss='mse', + # optimizer='adam', + # optimizer=Adam(lr=0.01), + optimizer=SGD(lr=0.08, momentum=0.9), + metrics=['mse'], +) + +r = model.fit( + x=[df_train.userId.values, df_train.movie_idx.values], + y=df_train.rating.values - mu, + epochs=epochs, + batch_size=128, + validation_data=( + [df_test.userId.values, df_test.movie_idx.values], + df_test.rating.values - mu + ) +) + + +# plot losses +plt.plot(r.history['loss'], label="train loss") +plt.plot(r.history['val_loss'], label="test loss") +plt.legend() +plt.show() + +# plot mse +plt.plot(r.history['mean_squared_error'], label="train mse") +plt.plot(r.history['val_mean_squared_error'], label="test mse") +plt.legend() +plt.show() diff --git a/recommenders/mf_keras_deep.py b/recommenders/mf_keras_deep.py new file mode 100644 index 00000000..f3888a7a --- /dev/null +++ b/recommenders/mf_keras_deep.py @@ -0,0 +1,89 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle + +from keras.models import Model +from keras.layers import Input, Embedding, Flatten, Dense, Concatenate +from keras.layers import Dropout, BatchNormalization, Activation +from keras.regularizers import l2 +from keras.optimizers import SGD, Adam + +# load in the data +df = pd.read_csv('../large_files/movielens-20m-dataset/edited_rating.csv') + +N = df.userId.max() + 1 # number of users +M = df.movie_idx.max() + 1 # number of movies + +# split into train and test +df = shuffle(df) +cutoff = int(0.8*len(df)) +df_train = df.iloc[:cutoff] +df_test = df.iloc[cutoff:] + +# initialize variables +K = 10 # latent dimensionality +mu = df_train.rating.mean() +epochs = 15 +# reg = 0.0001 # regularization penalty + + +# keras model +u = Input(shape=(1,)) +m = Input(shape=(1,)) +u_embedding = Embedding(N, K)(u) # (N, 1, K) +m_embedding = Embedding(M, K)(m) # (N, 1, K) +u_embedding = Flatten()(u_embedding) # (N, K) +m_embedding = Flatten()(m_embedding) # (N, K) +x = Concatenate()([u_embedding, m_embedding]) # (N, 2K) + +# the neural network +x = Dense(400)(x) +# x = BatchNormalization()(x) +x = Activation('relu')(x) +# x = Dropout(0.5)(x) +# x = Dense(100)(x) +# x = BatchNormalization()(x) +# x = Activation('relu')(x) +x = Dense(1)(x) + +model = Model(inputs=[u, m], outputs=x) +model.compile( + loss='mse', + # optimizer='adam', + # optimizer=Adam(lr=0.01), + optimizer=SGD(lr=0.08, momentum=0.9), + metrics=['mse'], +) + +r = model.fit( + x=[df_train.userId.values, df_train.movie_idx.values], + y=df_train.rating.values - mu, + epochs=epochs, + batch_size=128, + validation_data=( + [df_test.userId.values, df_test.movie_idx.values], + df_test.rating.values - mu + ) +) + + +# plot losses +plt.plot(r.history['loss'], label="train loss") +plt.plot(r.history['val_loss'], label="test loss") +plt.legend() +plt.show() + +# plot mse +plt.plot(r.history['mean_squared_error'], label="train mse") +plt.plot(r.history['val_mean_squared_error'], label="test mse") +plt.legend() +plt.show() diff --git a/recommenders/mf_keras_res.py b/recommenders/mf_keras_res.py new file mode 100644 index 00000000..b362eded --- /dev/null +++ b/recommenders/mf_keras_res.py @@ -0,0 +1,98 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle + +from keras.models import Model +from keras.layers import Input, Embedding, Dot, Add, Flatten, Dense, Concatenate +from keras.layers import Dropout, BatchNormalization, Activation +from keras.regularizers import l2 +from keras.optimizers import SGD, Adam + +# load in the data +df = pd.read_csv('../large_files/movielens-20m-dataset/edited_rating.csv') + +N = df.userId.max() + 1 # number of users +M = df.movie_idx.max() + 1 # number of movies + +# split into train and test +df = shuffle(df) +cutoff = int(0.8*len(df)) +df_train = df.iloc[:cutoff] +df_test = df.iloc[cutoff:] + +# initialize variables +K = 10 # latent dimensionality +mu = df_train.rating.mean() +epochs = 15 +reg = 0. # regularization penalty + + +# keras model +u = Input(shape=(1,)) +m = Input(shape=(1,)) +u_embedding = Embedding(N, K)(u) # (N, 1, K) +m_embedding = Embedding(M, K)(m) # (N, 1, K) + + +##### main branch +u_bias = Embedding(N, 1)(u) # (N, 1, 1) +m_bias = Embedding(M, 1)(m) # (N, 1, 1) +x = Dot(axes=2)([u_embedding, m_embedding]) # (N, 1, 1) +x = Add()([x, u_bias, m_bias]) +x = Flatten()(x) # (N, 1) + + +##### side branch +u_embedding = Flatten()(u_embedding) # (N, K) +m_embedding = Flatten()(m_embedding) # (N, K) +y = Concatenate()([u_embedding, m_embedding]) # (N, 2K) +y = Dense(400)(y) +y = Activation('elu')(y) +# y = Dropout(0.5)(y) +y = Dense(1)(y) + + +##### merge +x = Add()([x, y]) + +model = Model(inputs=[u, m], outputs=x) +model.compile( + loss='mse', + # optimizer='adam', + # optimizer=Adam(lr=0.01), + optimizer=SGD(lr=0.08, momentum=0.9), + metrics=['mse'], +) + +r = model.fit( + x=[df_train.userId.values, df_train.movie_idx.values], + y=df_train.rating.values - mu, + epochs=epochs, + batch_size=128, + validation_data=( + [df_test.userId.values, df_test.movie_idx.values], + df_test.rating.values - mu + ) +) + + +# plot losses +plt.plot(r.history['loss'], label="train loss") +plt.plot(r.history['val_loss'], label="test loss") +plt.legend() +plt.show() + +# plot mse +plt.plot(r.history['mean_squared_error'], label="train mse") +plt.plot(r.history['val_mean_squared_error'], label="test mse") +plt.legend() +plt.show() \ No newline at end of file diff --git a/recommenders/preprocess.py b/recommenders/preprocess.py new file mode 100644 index 00000000..72585460 --- /dev/null +++ b/recommenders/preprocess.py @@ -0,0 +1,41 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pandas as pd + +# https://www.kaggle.com/grouplens/movielens-20m-dataset +df = pd.read_csv('../large_files/movielens-20m-dataset/rating.csv') + + + +# note: +# user ids are ordered sequentially from 1..138493 +# with no missing numbers +# movie ids are integers from 1..131262 +# NOT all movie ids appear +# there are only 26744 movie ids +# write code to check it yourself! + + +# make the user ids go from 0...N-1 +df.userId = df.userId - 1 + +# create a mapping for movie ids +unique_movie_ids = set(df.movieId.values) +movie2idx = {} +count = 0 +for movie_id in unique_movie_ids: + movie2idx[movie_id] = count + count += 1 + +# add them to the data frame +# takes awhile +df['movie_idx'] = df.apply(lambda row: movie2idx[row.movieId], axis=1) + +df = df.drop(columns=['timestamp']) + +df.to_csv('../large_files/movielens-20m-dataset/edited_rating.csv', index=False) \ No newline at end of file diff --git a/recommenders/preprocess2dict.py b/recommenders/preprocess2dict.py new file mode 100644 index 00000000..2ed5d8b7 --- /dev/null +++ b/recommenders/preprocess2dict.py @@ -0,0 +1,82 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle + +# load in the data +# https://www.kaggle.com/grouplens/movielens-20m-dataset +df = pd.read_csv('../large_files/movielens-20m-dataset/very_small_rating.csv') + +N = df.userId.max() + 1 # number of users +M = df.movie_idx.max() + 1 # number of movies + +# split into train and test +df = shuffle(df) +cutoff = int(0.8*len(df)) +df_train = df.iloc[:cutoff] +df_test = df.iloc[cutoff:] + +# a dictionary to tell us which users have rated which movies +user2movie = {} +# a dicationary to tell us which movies have been rated by which users +movie2user = {} +# a dictionary to look up ratings +usermovie2rating = {} +print("Calling: update_user2movie_and_movie2user") +count = 0 +def update_user2movie_and_movie2user(row): + global count + count += 1 + if count % 100000 == 0: + print("processed: %.3f" % (float(count)/cutoff)) + + i = int(row.userId) + j = int(row.movie_idx) + if i not in user2movie: + user2movie[i] = [j] + else: + user2movie[i].append(j) + + if j not in movie2user: + movie2user[j] = [i] + else: + movie2user[j].append(i) + + usermovie2rating[(i,j)] = row.rating +df_train.apply(update_user2movie_and_movie2user, axis=1) + +# test ratings dictionary +usermovie2rating_test = {} +print("Calling: update_usermovie2rating_test") +count = 0 +def update_usermovie2rating_test(row): + global count + count += 1 + if count % 100000 == 0: + print("processed: %.3f" % (float(count)/len(df_test))) + + i = int(row.userId) + j = int(row.movie_idx) + usermovie2rating_test[(i,j)] = row.rating +df_test.apply(update_usermovie2rating_test, axis=1) + +# note: these are not really JSONs +with open('user2movie.json', 'wb') as f: + pickle.dump(user2movie, f) + +with open('movie2user.json', 'wb') as f: + pickle.dump(movie2user, f) + +with open('usermovie2rating.json', 'wb') as f: + pickle.dump(usermovie2rating, f) + +with open('usermovie2rating_test.json', 'wb') as f: + pickle.dump(usermovie2rating_test, f) diff --git a/recommenders/preprocess2sparse.py b/recommenders/preprocess2sparse.py new file mode 100644 index 00000000..864de56d --- /dev/null +++ b/recommenders/preprocess2sparse.py @@ -0,0 +1,62 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle +from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz + +# load in the data +df = pd.read_csv('../large_files/movielens-20m-dataset/edited_rating.csv') +# df = pd.read_csv('../large_files/movielens-20m-dataset/small_rating.csv') + +N = df.userId.max() + 1 # number of users +M = df.movie_idx.max() + 1 # number of movies + +# split into train and test +df = shuffle(df) +cutoff = int(0.8*len(df)) +df_train = df.iloc[:cutoff] +df_test = df.iloc[cutoff:] + +A = lil_matrix((N, M)) +print("Calling: update_train") +count = 0 +def update_train(row): + global count + count += 1 + if count % 100000 == 0: + print("processed: %.3f" % (float(count)/cutoff)) + + i = int(row.userId) + j = int(row.movie_idx) + A[i,j] = row.rating +df_train.apply(update_train, axis=1) + +# mask, to tell us which entries exist and which do not +A = A.tocsr() +mask = (A > 0) +save_npz("Atrain.npz", A) + +# test ratings dictionary +A_test = lil_matrix((N, M)) +print("Calling: update_test") +count = 0 +def update_test(row): + global count + count += 1 + if count % 100000 == 0: + print("processed: %.3f" % (float(count)/len(df_test))) + + i = int(row.userId) + j = int(row.movie_idx) + A_test[i,j] = row.rating +df_test.apply(update_test, axis=1) +A_test = A_test.tocsr() +mask_test = (A_test > 0) +save_npz("Atest.npz", A_test) diff --git a/recommenders/preprocess_shrink.py b/recommenders/preprocess_shrink.py new file mode 100644 index 00000000..665a80e6 --- /dev/null +++ b/recommenders/preprocess_shrink.py @@ -0,0 +1,58 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +from collections import Counter + +# load in the data +# https://www.kaggle.com/grouplens/movielens-20m-dataset +df = pd.read_csv('../large_files/movielens-20m-dataset/edited_rating.csv') +print("original dataframe size:", len(df)) + +N = df.userId.max() + 1 # number of users +M = df.movie_idx.max() + 1 # number of movies + +user_ids_count = Counter(df.userId) +movie_ids_count = Counter(df.movie_idx) + +# number of users and movies we would like to keep +n = 10000 +m = 2000 + +user_ids = [u for u, c in user_ids_count.most_common(n)] +movie_ids = [m for m, c in movie_ids_count.most_common(m)] + +# make a copy, otherwise ids won't be overwritten +df_small = df[df.userId.isin(user_ids) & df.movie_idx.isin(movie_ids)].copy() + +# need to remake user ids and movie ids since they are no longer sequential +new_user_id_map = {} +i = 0 +for old in user_ids: + new_user_id_map[old] = i + i += 1 +print("i:", i) + +new_movie_id_map = {} +j = 0 +for old in movie_ids: + new_movie_id_map[old] = j + j += 1 +print("j:", j) + +print("Setting new ids") +df_small.loc[:, 'userId'] = df_small.apply(lambda row: new_user_id_map[row.userId], axis=1) +df_small.loc[:, 'movie_idx'] = df_small.apply(lambda row: new_movie_id_map[row.movie_idx], axis=1) +# df_small.drop(columns=['userId', 'movie_idx']) +# df_small.rename(index=str, columns={'new_userId': 'userId', 'new_movie_idx': 'movie_idx'}) +print("max user id:", df_small.userId.max()) +print("max movie id:", df_small.movie_idx.max()) + +print("small dataframe size:", len(df_small)) +df_small.to_csv('../large_files/movielens-20m-dataset/small_rating.csv', index=False) diff --git a/recommenders/userbased.py b/recommenders/userbased.py new file mode 100644 index 00000000..b512a722 --- /dev/null +++ b/recommenders/userbased.py @@ -0,0 +1,168 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import pickle +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.utils import shuffle +from datetime import datetime +from sortedcontainers import SortedList + +# load in the data +import os +if not os.path.exists('user2movie.json') or \ + not os.path.exists('movie2user.json') or \ + not os.path.exists('usermovie2rating.json') or \ + not os.path.exists('usermovie2rating_test.json'): + import preprocess2dict + + +with open('user2movie.json', 'rb') as f: + user2movie = pickle.load(f) + +with open('movie2user.json', 'rb') as f: + movie2user = pickle.load(f) + +with open('usermovie2rating.json', 'rb') as f: + usermovie2rating = pickle.load(f) + +with open('usermovie2rating_test.json', 'rb') as f: + usermovie2rating_test = pickle.load(f) + + +N = np.max(list(user2movie.keys())) + 1 +# the test set may contain movies the train set doesn't have data on +m1 = np.max(list(movie2user.keys())) +m2 = np.max([m for (u, m), r in usermovie2rating_test.items()]) +M = max(m1, m2) + 1 +print("N:", N, "M:", M) + +if N > 10000: + print("N =", N, "are you sure you want to continue?") + print("Comment out these lines if so...") + exit() + + +# to find the user similarities, you have to do O(N^2 * M) calculations! +# in the "real-world" you'd want to parallelize this +# note: we really only have to do half the calculations, since w_ij is symmetric +K = 25 # number of neighbors we'd like to consider +limit = 5 # number of common movies users must have in common in order to consider +neighbors = [] # store neighbors in this list +averages = [] # each user's average rating for later use +deviations = [] # each user's deviation for later use +for i in range(N): + # find the 25 closest users to user i + movies_i = user2movie[i] + movies_i_set = set(movies_i) + + # calculate avg and deviation + ratings_i = { movie:usermovie2rating[(i, movie)] for movie in movies_i } + avg_i = np.mean(list(ratings_i.values())) + dev_i = { movie:(rating - avg_i) for movie, rating in ratings_i.items() } + dev_i_values = np.array(list(dev_i.values())) + sigma_i = np.sqrt(dev_i_values.dot(dev_i_values)) + + # save these for later use + averages.append(avg_i) + deviations.append(dev_i) + + sl = SortedList() + for j in range(N): + # don't include yourself + if j != i: + movies_j = user2movie[j] + movies_j_set = set(movies_j) + common_movies = (movies_i_set & movies_j_set) # intersection + if len(common_movies) > limit: + # calculate avg and deviation + ratings_j = { movie:usermovie2rating[(j, movie)] for movie in movies_j } + avg_j = np.mean(list(ratings_j.values())) + dev_j = { movie:(rating - avg_j) for movie, rating in ratings_j.items() } + dev_j_values = np.array(list(dev_j.values())) + sigma_j = np.sqrt(dev_j_values.dot(dev_j_values)) + + # calculate correlation coefficient + numerator = sum(dev_i[m]*dev_j[m] for m in common_movies) + w_ij = numerator / (sigma_i * sigma_j) + + # insert into sorted list and truncate + # negate weight, because list is sorted ascending + # maximum value (1) is "closest" + sl.add((-w_ij, j)) + if len(sl) > K: + del sl[-1] + + # store the neighbors + neighbors.append(sl) + + # print out useful things + if i % 1 == 0: + print(i) + + +# using neighbors, calculate train and test MSE + +def predict(i, m): + # calculate the weighted sum of deviations + numerator = 0 + denominator = 0 + for neg_w, j in neighbors[i]: + # remember, the weight is stored as its negative + # so the negative of the negative weight is the positive weight + try: + numerator += -neg_w * deviations[j][m] + denominator += abs(neg_w) + except KeyError: + # neighbor may not have rated the same movie + # don't want to do dictionary lookup twice + # so just throw exception + pass + + if denominator == 0: + prediction = averages[i] + else: + prediction = numerator / denominator + averages[i] + prediction = min(5, prediction) + prediction = max(0.5, prediction) # min rating is 0.5 + return prediction + + +train_predictions = [] +train_targets = [] +for (i, m), target in usermovie2rating.items(): + # calculate the prediction for this movie + prediction = predict(i, m) + + # save the prediction and target + train_predictions.append(prediction) + train_targets.append(target) + +test_predictions = [] +test_targets = [] +# same thing for test set +for (i, m), target in usermovie2rating_test.items(): + # calculate the prediction for this movie + prediction = predict(i, m) + + # save the prediction and target + test_predictions.append(prediction) + test_targets.append(target) + + +# calculate accuracy +def mse(p, t): + p = np.array(p) + t = np.array(t) + return np.mean((p - t)**2) + +print('train mse:', mse(train_predictions, train_targets)) +print('test mse:', mse(test_predictions, test_targets)) + + + From 05d2580a3d91b8a77572d9ffc50f2c62027b0a88 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 12 Sep 2018 15:27:35 -0400 Subject: [PATCH 086/329] spark --- recommenders/spark.py | 53 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 recommenders/spark.py diff --git a/recommenders/spark.py b/recommenders/spark.py new file mode 100644 index 00000000..c6e71bd0 --- /dev/null +++ b/recommenders/spark.py @@ -0,0 +1,53 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems + +### meant to be pasted into console ### + +# notes: +# you may have trouble with full dataset on just your local machine +# if you want to know what's in an RDD, use .take(n), ex: +# tmp = p.take(5) +# print(tmp) + +from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating + +# load in the data +data = sc.textFile("/Users/macuser/Code/machine_learning_examples/large_files/movielens-20m-dataset/small_rating.csv") + +# filter out header +header = data.first() #extract header +data = data.filter(lambda row: row != header) + +# convert into a sequence of Rating objects +ratings = data.map( + lambda l: l.split(',') +).map( + lambda l: Rating(int(l[0]), int(l[1]), float(l[2])) +) + +# split into train and test +train, test = ratings.randomSplit([0.8, 0.2]) + +# train the model +K = 10 +epochs = 10 +model = ALS.train(train, K, epochs) + +# evaluate the model + +# train +x = train.map(lambda p: (p[0], p[1])) +p = model.predictAll(x).map(lambda r: ((r[0], r[1]), r[2])) +ratesAndPreds = train.map(lambda r: ((r[0], r[1]), r[2])).join(p) +# joins on first item: (user_id, movie_id) +# each row of result is: ((user_id, movie_id), (rating, prediction)) +mse = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() +print("train mse:", mse) + + +# test +x = test.map(lambda p: (p[0], p[1])) +p = model.predictAll(x).map(lambda r: ((r[0], r[1]), r[2])) +ratesAndPreds = test.map(lambda r: ((r[0], r[1]), r[2])).join(p) +mse = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() +print("test mse:", mse) \ No newline at end of file From f6f97af5e368bb1343243049d44f0a12635cfa38 Mon Sep 17 00:00:00 2001 From: Lazy Programmer Date: Wed, 12 Sep 2018 16:56:19 -0400 Subject: [PATCH 087/329] update --- recommenders/spark.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/recommenders/spark.py b/recommenders/spark.py index c6e71bd0..a03b2fb5 100644 --- a/recommenders/spark.py +++ b/recommenders/spark.py @@ -10,9 +10,10 @@ # print(tmp) from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating +import os # load in the data -data = sc.textFile("/Users/macuser/Code/machine_learning_examples/large_files/movielens-20m-dataset/small_rating.csv") +data = sc.textFile(os.path.expanduser('~') + "/Code/machine_learning_examples/large_files/movielens-20m-dataset/small_rating.csv") # filter out header header = data.first() #extract header @@ -42,7 +43,7 @@ # joins on first item: (user_id, movie_id) # each row of result is: ((user_id, movie_id), (rating, prediction)) mse = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() -print("train mse:", mse) +print("train mse: %s" % mse) # test @@ -50,4 +51,4 @@ p = model.predictAll(x).map(lambda r: ((r[0], r[1]), r[2])) ratesAndPreds = test.map(lambda r: ((r[0], r[1]), r[2])).join(p) mse = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() -print("test mse:", mse) \ No newline at end of file +print("test mse: %s" % mse) \ No newline at end of file From 9cc8ec3e382083171ed9f109453efc4a35182a29 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 14 Sep 2018 04:05:32 -0400 Subject: [PATCH 088/329] update --- recommenders/spark.py | 2 +- recommenders/spark2.py | 60 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 recommenders/spark2.py diff --git a/recommenders/spark.py b/recommenders/spark.py index c6e71bd0..6b85005f 100644 --- a/recommenders/spark.py +++ b/recommenders/spark.py @@ -12,7 +12,7 @@ from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating # load in the data -data = sc.textFile("/Users/macuser/Code/machine_learning_examples/large_files/movielens-20m-dataset/small_rating.csv") +data = sc.textFile("../large_files/movielens-20m-dataset/small_rating.csv") # filter out header header = data.first() #extract header diff --git a/recommenders/spark2.py b/recommenders/spark2.py new file mode 100644 index 00000000..5879269d --- /dev/null +++ b/recommenders/spark2.py @@ -0,0 +1,60 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems + +# notes: +# you may have trouble with full dataset on just your local machine +# if you want to know what's in an RDD, use .take(n), ex: +# tmp = p.take(5) +# print(tmp) + +from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating +from pyspark import SparkContext + +# increase memory +# SparkContext.setSystemProperty('spark.driver.memory', '10g') +# SparkContext.setSystemProperty('spark.executor.memory', '10g') + +sc = SparkContext("local", "Your App Name Here") + + +# load in the data +# data = sc.textFile("../large_files/movielens-20m-dataset/small_rating.csv") +data = sc.textFile("../large_files/movielens-20m-dataset/rating.csv.gz") + +# filter out header +header = data.first() #extract header +data = data.filter(lambda row: row != header) + +# convert into a sequence of Rating objects +ratings = data.map( + lambda l: l.split(',') +).map( + lambda l: Rating(int(l[0]), int(l[1]), float(l[2])) +) + +# split into train and test +train, test = ratings.randomSplit([0.8, 0.2]) + +# train the model +K = 10 +epochs = 10 +model = ALS.train(train, K, epochs) + +# evaluate the model + +# train +x = train.map(lambda p: (p[0], p[1])) +p = model.predictAll(x).map(lambda r: ((r[0], r[1]), r[2])) +ratesAndPreds = train.map(lambda r: ((r[0], r[1]), r[2])).join(p) +# joins on first item: (user_id, movie_id) +# each row of result is: ((user_id, movie_id), (rating, prediction)) +mse = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() +print("***** train mse: %s *****" % mse) + + +# test +x = test.map(lambda p: (p[0], p[1])) +p = model.predictAll(x).map(lambda r: ((r[0], r[1]), r[2])) +ratesAndPreds = test.map(lambda r: ((r[0], r[1]), r[2])).join(p) +mse = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() +print("***** test mse: %s *****" % mse) \ No newline at end of file From d3d1e486c820c718dcad2bafb3cd483dc4bb4274 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 16 Sep 2018 02:30:23 -0400 Subject: [PATCH 089/329] rbm --- recommenders/rbm_tf_k.py | 245 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 245 insertions(+) create mode 100644 recommenders/rbm_tf_k.py diff --git a/recommenders/rbm_tf_k.py b/recommenders/rbm_tf_k.py new file mode 100644 index 00000000..14b7af6f --- /dev/null +++ b/recommenders/rbm_tf_k.py @@ -0,0 +1,245 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt +from sklearn.utils import shuffle + +import pandas as pd +from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz +from datetime import datetime + + +# is it possible to one-hot encode the data prior to feeding it +# into the neural network, so that we don't have to do it on the fly? +# yes, but: +# 1) scipy sparse doesn't support N-D matrices +# 2) you can use the 'sparse' standalone package, but it takes very long +# and you will run out of RAM + + +def one_hot_encode(X, K): + # input is N x D + # output is N x D x K + N, D = X.shape + Y = np.zeros((N, D, K)) + for n, d in zip(*X.nonzero()): + # 0.5...5 --> 1..10 --> 0..9 + k = int(X[n,d]*2 - 1) + Y[n,d,k] = 1 + return Y + +def one_hot_mask(X, K): + # input is N x D + # output is N x D x K + N, D = X.shape + Y = np.zeros((N, D, K)) + # if X[n,d] == 0, there's a missing rating + # so the mask should be all zeros + # else, it should be all ones + for n, d in zip(*X.nonzero()): + Y[n,d,:] = 1 + return Y + +one_to_ten = np.arange(K) + 1 # [1, 2, 3, ..., 10] +def convert_probs_to_ratings(probs): + # probs is N x D x K + # output is N x D matrix of predicted ratings + # N, D, K = probs.shape + # out = np.zeros((N, D)) + # each predicted rating is a weighted average using the probabilities + # for n in range(N): + # for d in range(D): + # out[n,d] = probs[n,d].dot(one_to_ten) / 2 + # return out + return probs.dot(one_to_ten) / 2 + + + +def dot1(V, W): + # V is N x D x K (batch of visible units) + # W is D x K x M (weights) + # returns N x M (hidden layer size) + return tf.tensordot(V, W, axes=[[1,2], [0,1]]) + +def dot2(H, W): + # H is N x M (batch of hiddens) + # W is D x K x M (weights transposed) + # returns N x D x K (visible) + return tf.tensordot(H, W, axes=[[1], [2]]) + + +class RBM(object): + def __init__(self, D, M, K): + self.D = D # input feature size + self.M = M # hidden size + self.K = K # number of ratings + self.build(D, M, K) + + + def build(self, D, M, K): + # params + self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) + self.c = tf.Variable(np.zeros(M).astype(np.float32)) + self.b = tf.Variable(np.zeros((D, K)).astype(np.float32)) + + # data + self.X_in = tf.placeholder(tf.float32, shape=(None, D, K)) + self.mask = tf.placeholder(tf.float32, shape=(None, D, K)) + + # conditional probabilities + # NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2 + V = self.X_in + p_h_given_v = tf.nn.sigmoid(dot1(V, self.W) + self.c) + self.p_h_given_v = p_h_given_v # save for later + + # draw a sample from p(h | v) + r = tf.random_uniform(shape=tf.shape(p_h_given_v)) + H = tf.to_float(r < p_h_given_v) + + # draw a sample from p(v | h) + # note: we don't have to actually do the softmax + logits = dot2(H, self.W) + self.b + cdist = tf.distributions.Categorical(logits=logits) + X_sample = cdist.sample() # shape is (N, D) + X_sample = tf.one_hot(X_sample, depth=self.K) # turn it into (N, D, K) + X_sample = X_sample * self.mask # missing ratings shouldn't contribute to objective + + + # build the objective + objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample)) + self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) + + # build the cost + # we won't use this to optimize the model parameters + # just to observe what happens during training + logits = self.forward_logits(self.X_in) + self.cost = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits( + labels=self.X_in, + logits=logits, + ) + ) + + # to get the output + self.output_visible = self.forward_output(self.X_in) + + initop = tf.global_variables_initializer() + self.session = tf.Session() + self.session.run(initop) + + def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True): + N, D = X.shape + n_batches = N // batch_sz + + + costs = [] + test_costs = [] + for i in range(epochs): + t0 = datetime.now() + print("epoch:", i) + X, mask, X_test, mask_test = shuffle(X, mask, X_test, mask_test) # everything has to be shuffled accordingly + for j in range(n_batches): + x = X[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + m = mask[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + + # both visible units and mask have to be in one-hot form + # N x D --> N x D x K + batch_one_hot = one_hot_encode(x, self.K) + m = one_hot_mask(m, self.K) + + _, c = self.session.run( + (self.train_op, self.cost), + feed_dict={self.X_in: batch_one_hot, self.mask: m} + ) + + if j % 100 == 0: + print("j / n_batches:", j, "/", n_batches, "cost:", c) + print("duration:", datetime.now() - t0) + + # calculate the true train and test cost + t0 = datetime.now() + sse = 0 + test_sse = 0 + n = 0 + test_n = 0 + for j in range(n_batches): + x = X[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + m = mask[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + + # only visible input has to be in one-hot form + xoh = one_hot_encode(x, self.K) + + probs = self.get_visible(xoh) + xhat = convert_probs_to_ratings(probs) + sse += (m * (xhat - x)*(xhat - x)).sum() + n += m.sum() + + # the test PREDICTIONS come from the train data! + # X_test and mask_test are only used for targets + xt = X_test[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + mt = mask_test[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + + test_sse += (mt * (xhat - xt) * (xhat - xt)).sum() + test_n += mt.sum() + c = sse/n + ct = test_sse/test_n + print("train mse:", c) + print("test mse:", ct) + print("calculate cost duration:", datetime.now() - t0) + costs.append(c) + test_costs.append(ct) + if show_fig: + plt.plot(costs, label='train mse') + plt.plot(test_costs, label='test mse') + plt.legend() + plt.show() + + def free_energy(self, V): + first_term = -tf.reduce_sum(dot1(V, self.b)) + second_term = -tf.reduce_sum( + # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), + tf.nn.softplus(dot1(V, self.W) + self.c), + axis=1 + ) + return first_term + second_term + + def forward_hidden(self, X): + return tf.nn.sigmoid(dot1(X, self.W) + self.c) + + def forward_logits(self, X): + Z = self.forward_hidden(X) + return dot2(Z, self.W) + self.b + + def forward_output(self, X): + return tf.nn.softmax(self.forward_logits(X)) + + def transform(self, X): + # accepts and returns a real numpy array + # unlike forward_hidden and forward_output + # which deal with tensorflow variables + return self.session.run(self.p_h_given_v, feed_dict={self.X_in: X}) + + def get_visible(self, X): + return self.session.run(self.output_visible, feed_dict={self.X_in: X}) + + +def main(): + A = load_npz("Atrain.npz") + A_test = load_npz("Atest.npz") + mask = (A > 0) * 1.0 + mask_test = (A_test > 0) * 1.0 + + N, M = A.shape + rbm = RBM(M, 50, 10) + rbm.fit(A, mask, A_test, mask_test) + + +if __name__ == '__main__': + main() From 453bb36e87c8c714fb050e58361746a616f877a8 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 16 Sep 2018 02:34:22 -0400 Subject: [PATCH 090/329] oops --- recommenders/spark.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/recommenders/spark.py b/recommenders/spark.py index b5458100..23ea365a 100644 --- a/recommenders/spark.py +++ b/recommenders/spark.py @@ -13,11 +13,7 @@ import os # load in the data -<<<<<<< HEAD data = sc.textFile("../large_files/movielens-20m-dataset/small_rating.csv") -======= -data = sc.textFile(os.path.expanduser('~') + "/Code/machine_learning_examples/large_files/movielens-20m-dataset/small_rating.csv") ->>>>>>> f6f97af5e368bb1343243049d44f0a12635cfa38 # filter out header header = data.first() #extract header From 03f8867b6ba8fa2d314382b148e2d82e7bb53e54 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 16 Sep 2018 02:41:11 -0400 Subject: [PATCH 091/329] oops --- recommenders/rbm_tf_k.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recommenders/rbm_tf_k.py b/recommenders/rbm_tf_k.py index 14b7af6f..4c2f6f05 100644 --- a/recommenders/rbm_tf_k.py +++ b/recommenders/rbm_tf_k.py @@ -46,7 +46,7 @@ def one_hot_mask(X, K): Y[n,d,:] = 1 return Y -one_to_ten = np.arange(K) + 1 # [1, 2, 3, ..., 10] +one_to_ten = np.arange(10) + 1 # [1, 2, 3, ..., 10] def convert_probs_to_ratings(probs): # probs is N x D x K # output is N x D matrix of predicted ratings From f9687b70db9659ade3196d3e0fba5190fcf89344 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 18 Sep 2018 02:27:06 -0400 Subject: [PATCH 092/329] faster rbm --- recommenders/rbm_tf_k_faster.py | 218 ++++++++++++++++++++++++++++++++ 1 file changed, 218 insertions(+) create mode 100644 recommenders/rbm_tf_k_faster.py diff --git a/recommenders/rbm_tf_k_faster.py b/recommenders/rbm_tf_k_faster.py new file mode 100644 index 00000000..df812ba6 --- /dev/null +++ b/recommenders/rbm_tf_k_faster.py @@ -0,0 +1,218 @@ +# https://udemy.com/recommender-systems +# https://deeplearningcourses.com/recommender-systems +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt +from sklearn.utils import shuffle + +import pandas as pd +from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz +from datetime import datetime + + +def dot1(V, W): + # V is N x D x K (batch of visible units) + # W is D x K x M (weights) + # returns N x M (hidden layer size) + return tf.tensordot(V, W, axes=[[1,2], [0,1]]) + +def dot2(H, W): + # H is N x M (batch of hiddens) + # W is D x K x M (weights transposed) + # returns N x D x K (visible) + return tf.tensordot(H, W, axes=[[1], [2]]) + + +class RBM(object): + def __init__(self, D, M, K): + self.D = D # input feature size + self.M = M # hidden size + self.K = K # number of ratings + self.build(D, M, K) + + + def build(self, D, M, K): + # params + self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) + self.c = tf.Variable(np.zeros(M).astype(np.float32)) + self.b = tf.Variable(np.zeros((D, K)).astype(np.float32)) + + # data + self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + + # one hot encode X + # first, make each rating an int + X = tf.cast(self.X_in * 2 - 1, tf.int32) + X = tf.one_hot(X, K) + + # conditional probabilities + # NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2 + V = X + p_h_given_v = tf.nn.sigmoid(dot1(V, self.W) + self.c) + self.p_h_given_v = p_h_given_v # save for later + + # draw a sample from p(h | v) + r = tf.random_uniform(shape=tf.shape(p_h_given_v)) + H = tf.to_float(r < p_h_given_v) + + # draw a sample from p(v | h) + # note: we don't have to actually do the softmax + logits = dot2(H, self.W) + self.b + cdist = tf.distributions.Categorical(logits=logits) + X_sample = cdist.sample() # shape is (N, D) + X_sample = tf.one_hot(X_sample, depth=self.K) # turn it into (N, D, K) + + # mask X_sample to remove missing ratings + mask2d = tf.cast(self.X_in > 0, tf.float32) + mask3d = tf.stack([mask2d]*K, axis=-1) # repeat K times in last dimension + X_sample = X_sample * mask3d + + + # build the objective + objective = tf.reduce_mean(self.free_energy(X)) - tf.reduce_mean(self.free_energy(X_sample)) + self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) + + # build the cost + # we won't use this to optimize the model parameters + # just to observe what happens during training + logits = self.forward_logits(X) + self.cost = tf.reduce_mean( + tf.nn.softmax_cross_entropy_with_logits( + labels=X, + logits=logits, + ) + ) + + # to get the output + self.output_visible = self.forward_output(X) + + + # for calculating SSE + self.one_to_ten = tf.constant(one_to_ten.astype(np.float32) / 2) + self.pred = tf.tensordot(self.output_visible, self.one_to_ten, axes=[[2], [0]]) + mask = tf.cast(self.X_in > 0, tf.float32) + se = mask * (self.X_in - self.pred) * (self.X_in - self.pred) + self.sse = tf.reduce_sum(se) + + # test SSE + self.X_test = tf.placeholder(tf.float32, shape=(None, D)) + mask = tf.cast(self.X_test > 0, tf.float32) + tse = mask * (self.X_test - self.pred) * (self.X_test - self.pred) + self.tsse = tf.reduce_sum(tse) + + + initop = tf.global_variables_initializer() + self.session = tf.Session() + self.session.run(initop) + + def fit(self, X, X_test, epochs=10, batch_sz=256, show_fig=True): + N, D = X.shape + n_batches = N // batch_sz + + + costs = [] + test_costs = [] + for i in range(epochs): + t0 = datetime.now() + print("epoch:", i) + X, X_test = shuffle(X, X_test) # everything has to be shuffled accordingly + for j in range(n_batches): + x = X[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + + _, c = self.session.run( + (self.train_op, self.cost), + feed_dict={self.X_in: x} + ) + + if j % 100 == 0: + print("j / n_batches:", j, "/", n_batches, "cost:", c) + print("duration:", datetime.now() - t0) + + # calculate the true train and test cost + t0 = datetime.now() + sse = 0 + test_sse = 0 + n = 0 + test_n = 0 + for j in range(n_batches): + x = X[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + xt = X_test[j*batch_sz:(j*batch_sz + batch_sz)].toarray() + + # number of train ratings + n += np.count_nonzero(x) + + # number of test ratings + test_n += np.count_nonzero(xt) + + # use tensorflow to get SSEs + sse_j, tsse_j = self.get_sse(x, xt) + sse += sse_j + test_sse += tsse_j + c = sse/n + ct = test_sse/test_n + print("train mse:", c) + print("test mse:", ct) + print("calculate cost duration:", datetime.now() - t0) + costs.append(c) + test_costs.append(ct) + if show_fig: + plt.plot(costs, label='train mse') + plt.plot(test_costs, label='test mse') + plt.legend() + plt.show() + + def free_energy(self, V): + first_term = -tf.reduce_sum(dot1(V, self.b)) + second_term = -tf.reduce_sum( + # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), + tf.nn.softplus(dot1(V, self.W) + self.c), + axis=1 + ) + return first_term + second_term + + def forward_hidden(self, X): + return tf.nn.sigmoid(dot1(X, self.W) + self.c) + + def forward_logits(self, X): + Z = self.forward_hidden(X) + return dot2(Z, self.W) + self.b + + def forward_output(self, X): + return tf.nn.softmax(self.forward_logits(X)) + + def transform(self, X): + # accepts and returns a real numpy array + # unlike forward_hidden and forward_output + # which deal with tensorflow variables + return self.session.run(self.p_h_given_v, feed_dict={self.X_in: X}) + + def get_visible(self, X): + return self.session.run(self.output_visible, feed_dict={self.X_in: X}) + + def get_sse(self, X, Xt): + return self.session.run( + (self.sse, self.tsse), + feed_dict={ + self.X_in: X, + self.X_test: Xt, + }) + + + +def main(): + A = load_npz("Atrain.npz") + A_test = load_npz("Atest.npz") + + N, M = A.shape + rbm = RBM(M, 50, 10) + rbm.fit(A, A_test) + + +if __name__ == '__main__': + main() From c9449a6b631db0a4d2c29fd3355dca1e7961e503 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 20 Sep 2018 15:57:39 -0400 Subject: [PATCH 093/329] update --- recommenders/rbm_tf_k.py | 2 +- recommenders/rbm_tf_k_faster.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/recommenders/rbm_tf_k.py b/recommenders/rbm_tf_k.py index 4c2f6f05..6043085e 100644 --- a/recommenders/rbm_tf_k.py +++ b/recommenders/rbm_tf_k.py @@ -107,7 +107,7 @@ def build(self, D, M, K): logits = dot2(H, self.W) + self.b cdist = tf.distributions.Categorical(logits=logits) X_sample = cdist.sample() # shape is (N, D) - X_sample = tf.one_hot(X_sample, depth=self.K) # turn it into (N, D, K) + X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K) X_sample = X_sample * self.mask # missing ratings shouldn't contribute to objective diff --git a/recommenders/rbm_tf_k_faster.py b/recommenders/rbm_tf_k_faster.py index df812ba6..75100ba0 100644 --- a/recommenders/rbm_tf_k_faster.py +++ b/recommenders/rbm_tf_k_faster.py @@ -65,7 +65,7 @@ def build(self, D, M, K): logits = dot2(H, self.W) + self.b cdist = tf.distributions.Categorical(logits=logits) X_sample = cdist.sample() # shape is (N, D) - X_sample = tf.one_hot(X_sample, depth=self.K) # turn it into (N, D, K) + X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K) # mask X_sample to remove missing ratings mask2d = tf.cast(self.X_in > 0, tf.float32) @@ -94,7 +94,7 @@ def build(self, D, M, K): # for calculating SSE - self.one_to_ten = tf.constant(one_to_ten.astype(np.float32) / 2) + self.one_to_ten = tf.constant((np.arange(10) + 1).astype(np.float32) / 2) self.pred = tf.tensordot(self.output_visible, self.one_to_ten, axes=[[2], [0]]) mask = tf.cast(self.X_in > 0, tf.float32) se = mask * (self.X_in - self.pred) * (self.X_in - self.pred) From c06bde607e395b4c6ff23de99c738d71065b0cf2 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 22 Sep 2018 01:47:06 -0400 Subject: [PATCH 094/329] update --- ann_class2/keras_functional.py | 78 ++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 ann_class2/keras_functional.py diff --git a/ann_class2/keras_functional.py b/ann_class2/keras_functional.py new file mode 100644 index 00000000..b8c1a793 --- /dev/null +++ b/ann_class2/keras_functional.py @@ -0,0 +1,78 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Model +from keras.layers import Dense, Input +from util import get_normalized_data, y2indicator + +import matplotlib.pyplot as plt + +# NOTE: do NOT name your file keras.py because it will conflict +# with importing keras + +# installation is easy! just the usual "sudo pip(3) install keras" + + +# get the data, same as Theano + Tensorflow examples +# no need to split now, the fit() function will do it +Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() + +# get shapes +N, D = Xtrain.shape +K = len(set(Ytrain)) + +# by default Keras wants one-hot encoded labels +# there's another cost function we can use +# where we can just pass in the integer labels directly +# just like Tensorflow / Theano +Ytrain = y2indicator(Ytrain) +Ytest = y2indicator(Ytest) + + +# ANN with layers [784] -> [500] -> [300] -> [10] +i = Input(shape=(D,)) +x = Dense(500, activation='relu')(i) +x = Dense(300, activation='relu')(x) +x = Dense(K, activation='softmax')(x) +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + From 551283f487f05954ee116f3ccddbe4815ab5808f Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 2 Oct 2018 00:02:03 -0400 Subject: [PATCH 095/329] update --- hmm_class/hmm_classifier.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hmm_class/hmm_classifier.py b/hmm_class/hmm_classifier.py index 7c7ca381..8fa62939 100644 --- a/hmm_class/hmm_classifier.py +++ b/hmm_class/hmm_classifier.py @@ -24,13 +24,14 @@ def __init__(self): def fit(self, X, Y, V): K = len(set(Y)) # number of classes - assume 0..K-1 + N = len(Y) self.models = [] self.priors = [] for k in range(K): # gather all the training data for this class thisX = [x for x, y in zip(X, Y) if y == k] C = len(thisX) - self.priors.append(np.log(C)) + self.priors.append(np.log(C) - np.log(N)) hmm = HMM(5) hmm.fit(thisX, V=V, print_period=1, learning_rate=1e-2, max_iter=80) From 6563bb707fcb19eccec8511fb383b9a12951173c Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 3 Oct 2018 14:26:12 -0400 Subject: [PATCH 096/329] update --- recommenders/mf.py | 6 +++--- recommenders/mf2.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/recommenders/mf.py b/recommenders/mf.py index 7864c64a..6507da83 100644 --- a/recommenders/mf.py +++ b/recommenders/mf.py @@ -65,7 +65,7 @@ def get_loss(d): # train the parameters epochs = 25 -reg = 0.01 # regularization penalty +reg =20. # regularization penalty train_losses = [] test_losses = [] for epoch in range(epochs): @@ -90,7 +90,7 @@ def get_loss(d): # set the updates W[i] = np.linalg.solve(matrix, vector) - b[i] = bi / ((1 + reg)*len(user2movie[i])) + b[i] = bi / (len(user2movie[i]) + reg) if i % (N//10) == 0: print("i:", i, "N:", N) @@ -114,7 +114,7 @@ def get_loss(d): # set the updates U[j] = np.linalg.solve(matrix, vector) - c[j] = cj / ((1 + reg)*len(movie2user[j])) + c[j] = cj / (len(movie2user[j]) + reg) if j % (M//10) == 0: print("j:", j, "M:", M) diff --git a/recommenders/mf2.py b/recommenders/mf2.py index 72dc5db7..62b599c6 100644 --- a/recommenders/mf2.py +++ b/recommenders/mf2.py @@ -91,7 +91,7 @@ def get_loss(m2u): # train the parameters epochs = 25 -reg = 0.1 # regularization penalty +reg = 20. # regularization penalty train_losses = [] test_losses = [] for epoch in range(epochs): @@ -109,7 +109,7 @@ def get_loss(m2u): # set the updates W[i] = np.linalg.solve(matrix, vector) - b[i] = bi / ((1 + reg)*len(user2movie[i])) + b[i] = bi / (len(user2movie[i]) + reg) if i % (N//10) == 0: print("i:", i, "N:", N) @@ -127,7 +127,7 @@ def get_loss(m2u): # set the updates U[j] = np.linalg.solve(matrix, vector) - c[j] = cj / ((1 + reg)*len(movie2user[j])) + c[j] = cj / (len(movie2user[j]) + reg) if j % (M//10) == 0: print("j:", j, "M:", M) From 26ad2a7e29286e7a6e7fd2005c7bbd718cdc58f9 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 4 Oct 2018 01:26:57 -0400 Subject: [PATCH 097/329] add reading --- supervised_class2/extra_reading.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 supervised_class2/extra_reading.txt diff --git a/supervised_class2/extra_reading.txt b/supervised_class2/extra_reading.txt new file mode 100644 index 00000000..f826cb04 --- /dev/null +++ b/supervised_class2/extra_reading.txt @@ -0,0 +1,11 @@ +RANDOM FORESTS +https://www.stat.berkeley.edu/~breiman/randomforest2001.pdf + +A Short Introduction to Boosting +https://cseweb.ucsd.edu/~yfreund/papers/IntroToBoosting.pdf + +Explaining AdaBoost +http://rob.schapire.net/papers/explaining-adaboost.pdf + +Improved Boosting Algorithms Using Confidence-rated Predictions +https://sci2s.ugr.es/keel/pdf/algorithm/articulo/1999-ML-Improved%20boosting%20algorithms%20using%20confidence-rated%20predictions%20(Schapire%20y%20Singer).pdf \ No newline at end of file From 3c7584c2f5747fbcfb4d0d3bebc053f541072faa Mon Sep 17 00:00:00 2001 From: Mac User Date: Fri, 5 Oct 2018 01:44:43 -0400 Subject: [PATCH 098/329] update --- ann_class2/rmsprop_test.py | 134 +++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 ann_class2/rmsprop_test.py diff --git a/ann_class2/rmsprop_test.py b/ann_class2/rmsprop_test.py new file mode 100644 index 00000000..0d4bd481 --- /dev/null +++ b/ann_class2/rmsprop_test.py @@ -0,0 +1,134 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Sequential +from keras.layers import Dense, Activation +from util import get_normalized_data, y2indicator + +import matplotlib.pyplot as plt +import numpy as np +import tensorflow as tf + +import theano +import theano.tensor as T + + +# RMSprop experiment +# to compare TF / Keras / Theano + +N = 10 +D = 2 +X = np.random.randn(N, D).astype(np.float32) +w = np.array([0.5, -0.5], dtype=np.float32) +Y = X.dot(w) + 1 +Y = Y.reshape(-1, 1) + + + +# keras +# the model will be a sequence of layers +model = Sequential() +model.add(Dense(units=1, input_dim=D)) + + +# copy the weights for later +weights = model.layers[0].get_weights() +w0 = weights[0].copy() +b0 = weights[1].copy() + + +model.compile( + loss='mean_squared_error', + optimizer='rmsprop', +) + + +r = model.fit(X, Y, epochs=15, batch_size=10) + + +# print the available keys +print(r.history.keys()) + + + +# tf +inputs = tf.placeholder(tf.float32, shape=(None, 2)) +targets = tf.placeholder(tf.float32, shape=(None, 1)) +tfw = tf.Variable(w0) +tfb = tf.Variable(b0) +pred = tf.matmul(inputs, tfw) + tfb + +loss = tf.reduce_mean(tf.square(targets - pred)) +train_op = tf.train.RMSPropOptimizer(1e-3, epsilon=1e-8).minimize(loss) + +tflosses = [] +init = tf.global_variables_initializer() +with tf.Session() as sess: + sess.run(init) + for e in range(15): + _, l = sess.run([train_op, loss], feed_dict={inputs: X, targets: Y}) + tflosses.append(l) + + + +# theano +def rmsprop(cost, params, lr=1e-3, decay=0.9, eps=1e-8): + # return updates + lr = np.float32(lr) + decay = np.float32(decay) + eps = np.float32(eps) + + updates = [] + grads = T.grad(cost, params) + + # tf-like + # caches = [theano.shared(np.ones_like(p.get_value(), dtype=np.float32)) for p in params] + + # keras-like + caches = [theano.shared(np.zeros_like(p.get_value(), dtype=np.float32)) for p in params] + + new_caches = [] + for c, g in zip(caches, grads): + new_c = decay*c + (np.float32(1) - decay)*g*g + updates.append((c, new_c)) + new_caches.append(new_c) + + for p, new_c, g in zip(params, new_caches, grads): + new_p = p - lr*g / T.sqrt(new_c + eps) + updates.append((p, new_p)) + + return updates + +thX = T.matrix('X') +thY = T.matrix('Y') +thw = theano.shared(w0) +thb = theano.shared(b0) +thP = thX.dot(thw) + thb +cost = T.mean((thY - thP)**2) +params = [thw, thb] +updates = rmsprop(cost, params) + +train_op = theano.function( + inputs=[thX, thY], + outputs=cost, + updates=updates, +) + +thlosses = [] +for e in range(15): + c = train_op(X, Y) + thlosses.append(c) + + +# plot results +plt.plot(r.history['loss'], label='keras loss') +plt.plot(tflosses, label='tf loss') +plt.plot(thlosses, label='theano loss') +plt.legend() +plt.show() + + From 6952b9dbc972c0cdf80e0b59387555a51ced02ad Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 6 Oct 2018 23:00:08 -0400 Subject: [PATCH 099/329] update --- recommenders/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/recommenders/extra_reading.txt b/recommenders/extra_reading.txt index fc0267bb..7b386c0a 100644 --- a/recommenders/extra_reading.txt +++ b/recommenders/extra_reading.txt @@ -10,6 +10,9 @@ https://github.com/reddit-archive/reddit/blob/master/r2/r2/lib/db/_sorts.pyx Revealed: US spy operation that manipulates social media https://www.theguardian.com/technology/2011/mar/17/us-spy-operation-social-networks +5G Got me Fired +https://medium.com/@dvorak/5g-got-me-fired-ce407e584c4a + Learning to rank https://en.wikipedia.org/wiki/Learning_to_rank#Evaluation_measures From a967f4271c723cb2bb9cdc70d6f9004ed5200c81 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 12 Oct 2018 15:56:16 -0400 Subject: [PATCH 100/329] small update --- rl/extra_reading.txt | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index 62d8b0ce..ab7ba7de 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -4,8 +4,35 @@ http://web.mst.edu/~gosavia/joc.pdf Algorithms for Reinforcement Learning - Csaba Szepesv´ari http://old.sztaki.hu/~szcsaba/papers/RLAlgsInMDPs-lecture.pdf +Markov Decision Processes in Artificial Intelligence +https://zodml.org/sites/default/files/Markov_Decision_Processes_and_Artificial_Intelligence.pdf + +MDP Preliminaries +http://nanjiang.cs.illinois.edu/files/cs598/note1.pdf + +Concentration Inequalities and Multi-Armed Bandits +http://nanjiang.cs.illinois.edu/files/cs598/note_bandit.pdf + +Notes on Tabular Methods +http://nanjiang.cs.illinois.edu/files/cs598/note3.pdf + +Notes on State Abstractions +http://nanjiang.cs.illinois.edu/files/cs598/note4.pdf + +Notes on Fitted Q-iteration +http://nanjiang.cs.illinois.edu/files/cs598/note5.pdf + +Convergence of Stochastic Iterative Dynamic Programming Algorithms +https://papers.nips.cc/paper/764-convergence-of-stochastic-iterative-dynamic-programming-algorithms.pdf + Sutton & Barto http://incompleteideas.net/sutton/book/the-book-2nd.html +Finite-Sample Analysis of Proximal Gradient TD Algorithms +https://marek.petrik.us/pub/Liu2015.pdf + +Finite Sample Analyses for TD(0) with Function Approximation +https://arxiv.org/pdf/1704.01161.pdf + Mastering the game of Go with deep neural networks and tree search - Silver, D. et al. https://storage.googleapis.com/deepmind-media/alphago/AlphaGoNaturePaper.pdf From 020a7a625baa28358293f220c8ffb7fce57286c3 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 20 Oct 2018 21:06:48 -0400 Subject: [PATCH 101/329] add svd --- nlp_class2/extra_reading.txt | 3 + nlp_class2/glove.py | 12 +-- nlp_class2/glove_svd.py | 194 +++++++++++++++++++++++++++++++++++ 3 files changed, 203 insertions(+), 6 deletions(-) create mode 100644 nlp_class2/glove_svd.py diff --git a/nlp_class2/extra_reading.txt b/nlp_class2/extra_reading.txt index bbb408ad..4f769441 100644 --- a/nlp_class2/extra_reading.txt +++ b/nlp_class2/extra_reading.txt @@ -1,3 +1,6 @@ +Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. GloVe: Global Vectors for Word Representation +https://nlp.stanford.edu/pubs/glove.pdf + Neural Word Embedding as Implicit Matrix Factorization http://papers.nips.cc/paper/5477-neural-word-embedding-as-implicit-matrix-factorization.pdf diff --git a/nlp_class2/glove.py b/nlp_class2/glove.py index d6f344ed..b46c13f2 100644 --- a/nlp_class2/glove.py +++ b/nlp_class2/glove.py @@ -34,7 +34,7 @@ def __init__(self, D, V, context_sz): self.V = V self.context_sz = context_sz - def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False, use_theano=False, use_tensorflow=False): + def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False): # build co-occurrence matrix # paper calls it X, so we will call it X, instead of calling # the training data X @@ -193,12 +193,12 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # update b for i in range(V): - denominator = fX[i,:].sum() + denominator = fX[i,:].sum() + reg # assert(denominator > 0) numerator = fX[i,:].dot(logX[i,:] - W[i].dot(U.T) - c - mu) # for j in range(V): # numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - c[j]) - b[i] = numerator / denominator / (1 + reg) + b[i] = numerator / denominator # print "updated b" # update U @@ -217,11 +217,11 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, # update c for j in range(V): - denominator = fX[:,j].sum() + denominator = fX[:,j].sum() + reg numerator = fX[:,j].dot(logX[:,j] - W.dot(U[j]) - b - mu) # for i in range(V): # numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - b[i]) - c[j] = numerator / denominator / (1 + reg) + c[j] = numerator / denominator # print "updated c" self.W = W @@ -266,7 +266,7 @@ def main(we_file, w2i_file, use_brown=True, n_files=100): json.dump(word2idx, f) V = len(word2idx) - model = Glove(200, V, 10) + model = Glove(100, V, 10) # alternating least squares method model.fit(sentences, cc_matrix=cc_matrix, epochs=20) diff --git a/nlp_class2/glove_svd.py b/nlp_class2/glove_svd.py new file mode 100644 index 00000000..fa5ec2c1 --- /dev/null +++ b/nlp_class2/glove_svd.py @@ -0,0 +1,194 @@ +# Course URL: +# https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python +# https://udemy.com/natural-language-processing-with-deep-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import os +import json +import numpy as np +import matplotlib.pyplot as plt +from sklearn.decomposition import TruncatedSVD + +from datetime import datetime +from sklearn.utils import shuffle +from util import find_analogies + + +import sys +sys.path.append(os.path.abspath('..')) +from rnn_class.util import get_wikipedia_data +from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx + + +class Glove: + def __init__(self, D, V, context_sz): + self.D = D + self.V = V + self.context_sz = context_sz + + def fit(self, sentences, cc_matrix=None): + # build co-occurrence matrix + # paper calls it X, so we will call it X, instead of calling + # the training data X + # TODO: would it be better to use a sparse matrix? + t0 = datetime.now() + V = self.V + D = self.D + + if not os.path.exists(cc_matrix): + X = np.zeros((V, V)) + N = len(sentences) + print("number of sentences to process:", N) + it = 0 + for sentence in sentences: + it += 1 + if it % 10000 == 0: + print("processed", it, "/", N) + n = len(sentence) + for i in range(n): + # i is not the word index!!! + # j is not the word index!!! + # i just points to which element of the sequence (sentence) we're looking at + wi = sentence[i] + + start = max(0, i - self.context_sz) + end = min(n, i + self.context_sz) + + # we can either choose only one side as context, or both + # here we are doing both + + # make sure "start" and "end" tokens are part of some context + # otherwise their f(X) will be 0 (denominator in bias update) + if i - self.context_sz < 0: + points = 1.0 / (i + 1) + X[wi,0] += points + X[0,wi] += points + if i + self.context_sz > n: + points = 1.0 / (n - i) + X[wi,1] += points + X[1,wi] += points + + # left side + for j in range(start, i): + wj = sentence[j] + points = 1.0 / (i - j) # this is +ve + X[wi,wj] += points + X[wj,wi] += points + + # right side + for j in range(i + 1, end): + wj = sentence[j] + points = 1.0 / (j - i) # this is +ve + X[wi,wj] += points + X[wj,wi] += points + + # save the cc matrix because it takes forever to create + np.save(cc_matrix, X) + else: + X = np.load(cc_matrix) + + print("max in X:", X.max()) + + # target + logX = np.log(X + 1) + + print("max in log(X):", logX.max()) + + print("time to build co-occurrence matrix:", (datetime.now() - t0)) + + # subtract global mean + mu = logX.mean() + + model = TruncatedSVD(n_components=D) + Z = model.fit_transform(logX - mu) + Sinv = np.linalg.inv(np.diag(model.explained_variance_)) + self.W = Z.dot(Sinv) + self.U = model.components_.T + + # calculate cost once + delta = self.W.dot(self.U.T) + mu - logX + cost = (delta * delta).sum() + print("svd cost:", cost) + + def save(self, fn): + # function word_analogies expects a (V,D) matrx and a (D,V) matrix + arrays = [self.W, self.U.T] + np.savez(fn, *arrays) + + +def main(we_file, w2i_file, use_brown=True, n_files=100): + if use_brown: + cc_matrix = "cc_matrix_brown.npy" + else: + cc_matrix = "cc_matrix_%s.npy" % n_files + + # hacky way of checking if we need to re-load the raw data or not + # remember, only the co-occurrence matrix is needed for training + if os.path.exists(cc_matrix): + with open(w2i_file) as f: + word2idx = json.load(f) + sentences = [] # dummy - we won't actually use it + else: + if use_brown: + keep_words = set([ + 'king', 'man', 'woman', + 'france', 'paris', 'london', 'rome', 'italy', 'britain', 'england', + 'french', 'english', 'japan', 'japanese', 'chinese', 'italian', + 'australia', 'australian', 'december', 'november', 'june', + 'january', 'february', 'march', 'april', 'may', 'july', 'august', + 'september', 'october', + ]) + sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=5000, keep_words=keep_words) + else: + sentences, word2idx = get_wikipedia_data(n_files=n_files, n_vocab=2000) + + with open(w2i_file, 'w') as f: + json.dump(word2idx, f) + + V = len(word2idx) + model = Glove(100, V, 10) + + # alternating least squares method + model.fit(sentences, cc_matrix=cc_matrix) + model.save(we_file) + + +if __name__ == '__main__': + we = 'glove_svd_50.npz' + w2i = 'glove_word2idx_50.json' + # we = 'glove_svd_brown.npz' + # w2i = 'glove_word2idx_brown.json' + main(we, w2i, use_brown=False) + + # load back embeddings + npz = np.load(we) + W1 = npz['arr_0'] + W2 = npz['arr_1'] + + with open(w2i) as f: + word2idx = json.load(f) + idx2word = {i:w for w,i in word2idx.items()} + + for concat in (True, False): + print("** concat:", concat) + + if concat: + We = np.hstack([W1, W2.T]) + else: + We = (W1 + W2.T) / 2 + + + find_analogies('king', 'man', 'woman', We, word2idx, idx2word) + find_analogies('france', 'paris', 'london', We, word2idx, idx2word) + find_analogies('france', 'paris', 'rome', We, word2idx, idx2word) + find_analogies('paris', 'france', 'italy', We, word2idx, idx2word) + find_analogies('france', 'french', 'english', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word) + find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word) + find_analogies('december', 'november', 'june', We, word2idx, idx2word) + From 1c29f046dd3871fd23661db35161374fb0af4a10 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 21 Oct 2018 23:11:28 -0400 Subject: [PATCH 102/329] update --- nlp_class2/glove_svd.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nlp_class2/glove_svd.py b/nlp_class2/glove_svd.py index fa5ec2c1..a0fd3c0a 100644 --- a/nlp_class2/glove_svd.py +++ b/nlp_class2/glove_svd.py @@ -105,12 +105,13 @@ def fit(self, sentences, cc_matrix=None): model = TruncatedSVD(n_components=D) Z = model.fit_transform(logX - mu) - Sinv = np.linalg.inv(np.diag(model.explained_variance_)) + S = np.diag(model.explained_variance_) + Sinv = np.linalg.inv(S) self.W = Z.dot(Sinv) self.U = model.components_.T # calculate cost once - delta = self.W.dot(self.U.T) + mu - logX + delta = self.W.dot(S).dot(self.U.T) + mu - logX cost = (delta * delta).sum() print("svd cost:", cost) From edbfe0e0840e2bea01bbed73a9ed85f61499b8d2 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 25 Oct 2018 23:00:14 -0400 Subject: [PATCH 103/329] update --- nlp_class3/convert_twitter.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 nlp_class3/convert_twitter.py diff --git a/nlp_class3/convert_twitter.py b/nlp_class3/convert_twitter.py new file mode 100644 index 00000000..477c20fd --- /dev/null +++ b/nlp_class3/convert_twitter.py @@ -0,0 +1,23 @@ +# https://deeplearningcourses.com/c/deep-learning-advanced-nlp +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +# each output line should be: +# INPUTRESPONSE +with open('../large_files/twitter_tab_format.txt', 'w') as f: + prev_line = None + # data source: https://github.com/Phylliida/Dialogue-Datasets + for line in open('../large_files/TwitterLowerAsciiCorpus.txt'): + line = line.rstrip() + + if prev_line and line: + f.write("%s\t%s\n" % (prev_line, line)) + + # note: + # between conversations there are empty lines + # which evaluate to false + + prev_line = line From 0baf80daeb7569aa6d097edd3dbf4f2075927b3f Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 16 Nov 2018 02:22:20 -0500 Subject: [PATCH 104/329] update --- recommenders/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/recommenders/extra_reading.txt b/recommenders/extra_reading.txt index 7b386c0a..21d09a4b 100644 --- a/recommenders/extra_reading.txt +++ b/recommenders/extra_reading.txt @@ -53,4 +53,7 @@ Restricted Boltzmann Machines for Collaborative Filtering https://www.cs.toronto.edu/~rsalakhu/papers/rbmcf.pdf AutoRec: Autoencoders Meet Collaborative Filtering -http://users.cecs.anu.edu.au/~u5098633/papers/www15.pdf \ No newline at end of file +http://users.cecs.anu.edu.au/~u5098633/papers/www15.pdf + +Collaborative Filtering for Implicit Feedback Datasets +http://yifanhu.net/PUB/cf.pdf \ No newline at end of file From 1b26291cc431e583d1eff0c7ed6678f05bfe6af4 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 16 Nov 2018 12:26:27 -0500 Subject: [PATCH 105/329] update --- ann_class2/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ann_class2/extra_reading.txt b/ann_class2/extra_reading.txt index 356dd690..f1825dd0 100644 --- a/ann_class2/extra_reading.txt +++ b/ann_class2/extra_reading.txt @@ -22,4 +22,7 @@ http://arxiv.org/abs/1502.01852 For understanding Nesterov Momentum: Advances in optimizing Recurrent Networks by Yoshua Bengio, Section 3.5 -http://arxiv.org/pdf/1212.0901v2.pdf \ No newline at end of file +http://arxiv.org/pdf/1212.0901v2.pdf + +Dropout: A Simple Way to Prevent Neural Networks from Overfitting +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf \ No newline at end of file From 32190b79891079a57d34896f0967ae4acca73b69 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 18 Nov 2018 04:51:56 -0500 Subject: [PATCH 106/329] update --- rl2/extra_reading.txt | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/rl2/extra_reading.txt b/rl2/extra_reading.txt index 60983730..b43b6d4e 100644 --- a/rl2/extra_reading.txt +++ b/rl2/extra_reading.txt @@ -1,2 +1,14 @@ Sutton & Barto -http://incompleteideas.net/sutton/book/the-book-2nd.html \ No newline at end of file +http://incompleteideas.net/sutton/book/the-book-2nd.html + +Implementation Details of the TD(λ) Procedure for the Case of Vector Predictions and Backpropagation +http://incompleteideas.net/papers/sutton-89.pdf + +Policy Gradient Methods for Reinforcement Learning with Function Approximation +https://homes.cs.washington.edu/~todorov/courses/amath579/reading/PolicyGradient.pdf + +Playing Atari with Deep Reinforcement Learning +https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf + +Asynchronous Methods for Deep Reinforcement Learning +https://arxiv.org/pdf/1602.01783.pdf From 7ec1b08e18e31383b4316d479449c88b745dfc47 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 8 Dec 2018 21:23:23 -0500 Subject: [PATCH 107/329] update --- rl/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index ab7ba7de..9a2fbc48 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -36,3 +36,6 @@ https://arxiv.org/pdf/1704.01161.pdf Mastering the game of Go with deep neural networks and tree search - Silver, D. et al. https://storage.googleapis.com/deepmind-media/alphago/AlphaGoNaturePaper.pdf + +Learning Rates for Q-learning +http://www.jmlr.org/papers/volume5/evendar03a/evendar03a.pdf \ No newline at end of file From 3efb846899a8e8091817f16db479951e418c52f7 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 17 Dec 2018 15:20:16 -0500 Subject: [PATCH 108/329] rename width and height to rows and cols --- rl/grid_world.py | 6 +++--- rl/iterative_policy_evaluation.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rl/grid_world.py b/rl/grid_world.py index 3f35efd2..891b5441 100644 --- a/rl/grid_world.py +++ b/rl/grid_world.py @@ -10,9 +10,9 @@ class Grid: # Environment - def __init__(self, width, height, start): - self.width = width - self.height = height + def __init__(self, rows, cols, start): + self.rows = rows + self.cols = cols self.i = start[0] self.j = start[1] diff --git a/rl/iterative_policy_evaluation.py b/rl/iterative_policy_evaluation.py index 7da1815e..fea8f438 100644 --- a/rl/iterative_policy_evaluation.py +++ b/rl/iterative_policy_evaluation.py @@ -12,9 +12,9 @@ SMALL_ENOUGH = 1e-3 # threshold for convergence def print_values(V, g): - for i in range(g.width): + for i in range(g.rows): print("---------------------------") - for j in range(g.height): + for j in range(g.cols): v = V.get((i,j), 0) if v >= 0: print(" %.2f|" % v, end="") @@ -24,9 +24,9 @@ def print_values(V, g): def print_policy(P, g): - for i in range(g.width): + for i in range(g.rows): print("---------------------------") - for j in range(g.height): + for j in range(g.cols): a = P.get((i,j), ' ') print(" %s |" % a, end="") print("") From 20784e4295ce11e741386af6e9b63527baa388aa Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 20 Dec 2018 22:57:14 -0500 Subject: [PATCH 109/329] update --- ann_class2/rmsprop.py | 4 ++-- ann_class2/sgd.py | 6 +++--- ann_class2/tensorflow2.py | 2 +- ann_class2/theano2.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ann_class2/rmsprop.py b/ann_class2/rmsprop.py index 518d0b98..43afce23 100644 --- a/ann_class2/rmsprop.py +++ b/ann_class2/rmsprop.py @@ -32,7 +32,7 @@ def main(): M = 300 K = 10 - W1 = np.random.randn(D, M) / 28 + W1 = np.random.randn(D, M) / np.sqrt(D) b1 = np.zeros(M) W2 = np.random.randn(M, K) / np.sqrt(M) b2 = np.zeros(K) @@ -71,7 +71,7 @@ def main(): # 2. RMSprop - W1 = np.random.randn(D, M) / 28 + W1 = np.random.randn(D, M) / np.sqrt(D) b1 = np.zeros(M) W2 = np.random.randn(M, K) / np.sqrt(M) b2 = np.zeros(K) diff --git a/ann_class2/sgd.py b/ann_class2/sgd.py index abc77e27..3c338b6d 100644 --- a/ann_class2/sgd.py +++ b/ann_class2/sgd.py @@ -36,7 +36,7 @@ def main(): Ytest_ind = y2indicator(Ytest) # 1. full - W = np.random.randn(D, 10) / 28 + W = np.random.randn(D, 10) / np.sqrt(D) b = np.zeros(10) LL = [] lr = 0.0001 @@ -63,7 +63,7 @@ def main(): # 2. stochastic - W = np.random.randn(D, 10) / 28 + W = np.random.randn(D, 10) / np.sqrt(D) b = np.zeros(10) LL_stochastic = [] lr = 0.0001 @@ -95,7 +95,7 @@ def main(): # 3. batch - W = np.random.randn(D, 10) / 28 + W = np.random.randn(D, 10) / np.sqrt(D) b = np.zeros(10) LL_batch = [] lr = 0.0001 diff --git a/ann_class2/tensorflow2.py b/ann_class2/tensorflow2.py index 6641f5b0..a07f0104 100644 --- a/ann_class2/tensorflow2.py +++ b/ann_class2/tensorflow2.py @@ -44,7 +44,7 @@ def main(): M1 = 300 M2 = 100 K = 10 - W1_init = np.random.randn(D, M1) / 28 + W1_init = np.random.randn(D, M1) / np.sqrt(D) b1_init = np.zeros(M1) W2_init = np.random.randn(M1, M2) / np.sqrt(M1) b2_init = np.zeros(M2) diff --git a/ann_class2/theano2.py b/ann_class2/theano2.py index 9712af21..3bf29744 100644 --- a/ann_class2/theano2.py +++ b/ann_class2/theano2.py @@ -47,7 +47,7 @@ def main(): M = 300 K = 10 - W1_init = np.random.randn(D, M) / 28 + W1_init = np.random.randn(D, M) / np.sqrt(D) b1_init = np.zeros(M) W2_init = np.random.randn(M, K) / np.sqrt(M) b2_init = np.zeros(K) From d55caef8181428749452ccfdf595b904b6d3be24 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 22 Dec 2018 15:42:12 -0500 Subject: [PATCH 110/329] a3c --- rl2/a3c/main.py | 98 ++++++++++++++ rl2/a3c/nets.py | 122 +++++++++++++++++ rl2/a3c/thread_example.py | 45 +++++++ rl2/a3c/worker.py | 269 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 534 insertions(+) create mode 100644 rl2/a3c/main.py create mode 100644 rl2/a3c/nets.py create mode 100644 rl2/a3c/thread_example.py create mode 100644 rl2/a3c/worker.py diff --git a/rl2/a3c/main.py b/rl2/a3c/main.py new file mode 100644 index 00000000..0e7f88bf --- /dev/null +++ b/rl2/a3c/main.py @@ -0,0 +1,98 @@ +import gym +import sys +import os +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt +import itertools +import shutil +import threading +import multiprocessing + +from nets import create_networks +from worker import Worker + + +ENV_NAME = "Breakout-v0" +MAX_GLOBAL_STEPS = 5e6 +STEPS_PER_UPDATE = 5 + + +def Env(): + return gym.envs.make(ENV_NAME) + +# Depending on the game we may have a limited action space +if ENV_NAME == "Pong-v0" or ENV_NAME == "Breakout-v0": + NUM_ACTIONS = 4 # env.action_space.n returns a bigger number +else: + env = Env() + NUM_ACTIONS = env.action_space.n + env.close() + + +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + + +# Set the number of workers +NUM_WORKERS = multiprocessing.cpu_count() + +with tf.device("/cpu:0"): + + # Keeps track of the number of updates we've performed + # https://www.tensorflow.org/api_docs/python/tf/train/global_step + global_step = tf.Variable(0, name="global_step", trainable=False) + + # Global policy and value nets + with tf.variable_scope("global") as vs: + policy_net, value_net = create_networks(NUM_ACTIONS) + + # Global step iterator + global_counter = itertools.count() + + # Save returns + returns_list = [] + + # Create workers + workers = [] + for worker_id in range(NUM_WORKERS): + worker = Worker( + name="worker_{}".format(worker_id), + env=Env(), + policy_net=policy_net, + value_net=value_net, + global_counter=global_counter, + returns_list=returns_list, + discount_factor = 0.99, + max_global_steps=MAX_GLOBAL_STEPS) + workers.append(worker) + +with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + coord = tf.train.Coordinator() + + # Start worker threads + worker_threads = [] + for worker in workers: + worker_fn = lambda: worker.run(sess, coord, STEPS_PER_UPDATE) + t = threading.Thread(target=worker_fn) + t.start() + worker_threads.append(t) + + # Wait for all workers to finish + coord.join(worker_threads, stop_grace_period_secs=300) + + # Plot the smoothed returns + x = np.array(returns_list) + y = smooth(x) + plt.plot(x, label='orig') + plt.plot(y, label='smoothed') + plt.legend() + plt.show() + diff --git a/rl2/a3c/nets.py b/rl2/a3c/nets.py new file mode 100644 index 00000000..c10dfd5a --- /dev/null +++ b/rl2/a3c/nets.py @@ -0,0 +1,122 @@ +import tensorflow as tf + + +def build_feature_extractor(input_): + # We only want to create the weights once + # In all future calls we should set reuse = True + + # scale the inputs from 0..255 to 0..1 + input_ = tf.to_float(input_) / 255.0 + + # conv layers + conv1 = tf.contrib.layers.conv2d( + input_, + 16, # num output feature maps + 8, # kernel size + 4, # stride + activation_fn=tf.nn.relu, + scope="conv1") + conv2 = tf.contrib.layers.conv2d( + conv1, + 32, # num output feature maps + 4, # kernel size + 2, # stride + activation_fn=tf.nn.relu, + scope="conv2") + + # image -> feature vector + flat = tf.contrib.layers.flatten(conv2) + + # dense layer + fc1 = tf.contrib.layers.fully_connected( + inputs=flat, + num_outputs=256, + scope="fc1") + + return fc1 + +class PolicyNetwork: + def __init__(self, num_outputs, reg=0.01): + self.num_outputs = num_outputs + + # Graph inputs + # After resizing we have 4 consecutive frames of size 84 x 84 + self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X") + # Advantage = G - V(s) + self.advantage = tf.placeholder(shape=[None], dtype=tf.float32, name="y") + # Selected actions + self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions") + + # Since we set reuse=False here, that means we MUST + # create the PolicyNetwork before creating the ValueNetwork + # ValueNetwork will use reuse=True + with tf.variable_scope("shared", reuse=False): + fc1 = build_feature_extractor(self.states) + + # Use a separate scope for output and loss + with tf.variable_scope("policy_network"): + self.logits = tf.contrib.layers.fully_connected(fc1, num_outputs, activation_fn=None) + self.probs = tf.nn.softmax(self.logits) + + # Sample an action + cdist = tf.distributions.Categorical(logits=self.logits) + self.sample_action = cdist.sample() + + # Add regularization to increase exploration + self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), axis=1) + + # Get the predictions for the chosen actions only + batch_size = tf.shape(self.states)[0] + gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1] + self.actions + self.selected_action_probs = tf.gather(tf.reshape(self.probs, [-1]), gather_indices) + + self.loss = tf.log(self.selected_action_probs) * self.advantage + reg * self.entropy + self.loss = -tf.reduce_sum(self.loss, name="loss") + + # training + self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6) + + # we'll need these later for running gradient descent steps + self.grads_and_vars = self.optimizer.compute_gradients(self.loss) + self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None] + + +class ValueNetwork: + def __init__(self): + # Placeholders for our input + # After resizing we have 4 consecutive frames of size 84 x 84 + self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X") + # The TD target value + self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="y") + + # Since we set reuse=True here, that means we MUST + # create the PolicyNetwork before creating the ValueNetwork + # PolictyNetwork will use reuse=False + with tf.variable_scope("shared", reuse=True): + fc1 = build_feature_extractor(self.states) + + # Use a separate scope for output and loss + with tf.variable_scope("value_network"): + self.vhat = tf.contrib.layers.fully_connected( + inputs=fc1, + num_outputs=1, + activation_fn=None) + self.vhat = tf.squeeze(self.vhat, squeeze_dims=[1], name="vhat") + + self.loss = tf.squared_difference(self.vhat, self.targets) + self.loss = tf.reduce_sum(self.loss, name="loss") + + # training + self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6) + + # we'll need these later for running gradient descent steps + self.grads_and_vars = self.optimizer.compute_gradients(self.loss) + self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None] + + +# Should use this to create networks +# to ensure they're created in the correct order +def create_networks(num_outputs): + policy_network = PolicyNetwork(num_outputs=num_outputs) + value_network = ValueNetwork() + return policy_network, value_network diff --git a/rl2/a3c/thread_example.py b/rl2/a3c/thread_example.py new file mode 100644 index 00000000..7d6e18e4 --- /dev/null +++ b/rl2/a3c/thread_example.py @@ -0,0 +1,45 @@ +import itertools +import threading +import time +import multiprocessing +import numpy as np + + +class Worker: + def __init__(self, id_, global_counter): + self.id = id_ + self.global_counter = global_counter + self.local_counter = itertools.count() + + def run(self): + while True: + time.sleep(np.random.rand()*2) + global_step = next(self.global_counter) + local_step = next(self.local_counter) + print("Worker({}): {}".format(self.id, local_step)) + if global_step >= 20: + break + +global_counter = itertools.count() +NUM_WORKERS = multiprocessing.cpu_count() + +# create the workers +workers = [] +for worker_id in range(NUM_WORKERS): + worker = Worker(worker_id, global_counter) + workers.append(worker) + +# start the threads +worker_threads = [] +for worker in workers: + worker_fn = lambda: worker.run() + t = threading.Thread(target=worker_fn) + t.start() + worker_threads.append(t) + + +# join the threads +# for t in worker_threads: +# t.join() + +print("DONE!") \ No newline at end of file diff --git a/rl2/a3c/worker.py b/rl2/a3c/worker.py new file mode 100644 index 00000000..bb7d82a8 --- /dev/null +++ b/rl2/a3c/worker.py @@ -0,0 +1,269 @@ +import gym +import sys +import os +import numpy as np +import tensorflow as tf + +from nets import create_networks + + +class Step: + def __init__(self, state, action, reward, next_state, done): + self.state = state + self.action = action + self.reward = reward + self.next_state = next_state + self.done = done + + +# Transform raw images for input into neural network +# 1) Convert to grayscale +# 2) Resize +# 3) Crop +class ImageTransformer: + def __init__(self): + with tf.variable_scope("image_transformer"): + self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8) + self.output = tf.image.rgb_to_grayscale(self.input_state) + self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160) + self.output = tf.image.resize_images( + self.output, + [84, 84], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) + self.output = tf.squeeze(self.output) + + def transform(self, state, sess=None): + sess = sess or tf.get_default_session() + return sess.run(self.output, { self.input_state: state }) + + +# Create initial state by repeating the same frame 4 times +def repeat_frame(frame): + return np.stack([frame] * 4, axis=2) + + +# Create next state by shifting each frame by 1 +# Throw out the oldest frame +# And concatenate the newest frame +def shift_frames(state, next_frame): + return np.append(state[:,:,1:], np.expand_dims(next_frame, 2), axis=2) + + +# Make a Tensorflow op to copy weights from one scope to another +def get_copy_params_op(src_vars, dst_vars): + src_vars = list(sorted(src_vars, key=lambda v: v.name)) + dst_vars = list(sorted(dst_vars, key=lambda v: v.name)) + + ops = [] + for s, d in zip(src_vars, dst_vars): + op = d.assign(s) + ops.append(op) + + return ops + + +def make_train_op(local_net, global_net): + """ + Use gradients from local network to update the global network + """ + + # Idea: + # We want a list of gradients and corresponding variables + # e.g. [[g1, g2, g3], [v1, v2, v3]] + # Since that's what the optimizer expects. + # But we would like the gradients to come from the local network + # And the variables to come from the global network + # So we want to make a list like this: + # [[local_g1, local_g2, local_g3], [global_v1, global_v2, global_v3]] + + # First get only the gradients + local_grads, _ = zip(*local_net.grads_and_vars) + + # Clip gradients to avoid large values + local_grads, _ = tf.clip_by_global_norm(local_grads, 5.0) + + # Get global vars + _, global_vars = zip(*global_net.grads_and_vars) + + # Combine local grads and global vars + local_grads_global_vars = list(zip(local_grads, global_vars)) + + # Run a gradient descent step, e.g. + # var = var - learning_rate * grad + return global_net.optimizer.apply_gradients( + local_grads_global_vars, + global_step=tf.train.get_global_step()) + + +# Worker object to be run in a thread +# name (String) should be unique for each thread +# env (OpenAI Gym Environment) should be unique for each thread +# policy_net (PolicyNetwork) should be a global passed to every worker +# value_net (ValueNetwork) should be a global passed to every worker +# returns_list (List) should be a global passed to every worker +class Worker: + def __init__( + self, + name, + env, + policy_net, + value_net, + global_counter, + returns_list, + discount_factor=0.99, + max_global_steps=None): + + self.name = name + self.env = env + self.global_policy_net = policy_net + self.global_value_net = value_net + self.global_counter = global_counter + self.discount_factor = discount_factor + self.max_global_steps = max_global_steps + self.global_step = tf.train.get_global_step() + self.img_transformer = ImageTransformer() + + # Create local policy and value networks that belong only to this worker + with tf.variable_scope(name): + # self.policy_net = PolicyNetwork(num_outputs=policy_net.num_outputs) + # self.value_net = ValueNetwork() + self.policy_net, self.value_net = create_networks(policy_net.num_outputs) + + # We will use this op to copy the global network weights + # back to the local policy and value networks + self.copy_params_op = get_copy_params_op( + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="global"), + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/')) + + # These will take the gradients from the local networks + # and use those gradients to update the global network + self.vnet_train_op = make_train_op(self.value_net, self.global_value_net) + self.pnet_train_op = make_train_op(self.policy_net, self.global_policy_net) + + self.state = None # Keep track of the current state + self.total_reward = 0. # After each episode print the total (sum of) reward + self.returns_list = returns_list # Global returns list to plot later + + def run(self, sess, coord, t_max): + with sess.as_default(), sess.graph.as_default(): + # Assign the initial state + self.state = repeat_frame(self.img_transformer.transform(self.env.reset())) + + try: + while not coord.should_stop(): + # Copy weights from global networks to local networks + sess.run(self.copy_params_op) + + # Collect some experience + steps, global_step = self.run_n_steps(t_max, sess) + + # Stop once the max number of global steps has been reached + if self.max_global_steps is not None and global_step >= self.max_global_steps: + coord.request_stop() + return + + # Update the global networks using local gradients + self.update(steps, sess) + + except tf.errors.CancelledError: + return + + def sample_action(self, state, sess): + # Make input N x D (N = 1) + feed_dict = { self.policy_net.states: [state] } + actions = sess.run(self.policy_net.sample_action, feed_dict) + # Prediction is a 1-D array of length N, just want the first value + return actions[0] + + def get_value_prediction(self, state, sess): + # Make input N x D (N = 1) + feed_dict = { self.value_net.states: [state] } + vhat = sess.run(self.value_net.vhat, feed_dict) + # Prediction is a 1-D array of length N, just want the first value + return vhat[0] + + def run_n_steps(self, n, sess): + steps = [] + for _ in range(n): + # Take a step + action = self.sample_action(self.state, sess) + next_frame, reward, done, _ = self.env.step(action) + + # Shift the state to include the latest frame + next_state = shift_frames(self.state, self.img_transformer.transform(next_frame)) + + # Save total return + if done: + print("Total reward:", self.total_reward, "Worker:", self.name) + self.returns_list.append(self.total_reward) + if len(self.returns_list) > 0 and len(self.returns_list) % 100 == 0: + print("*** Total average reward (last 100):", np.mean(self.returns_list[-100:]), "Collected so far:", len(self.returns_list)) + self.total_reward = 0. + else: + self.total_reward += reward + + # Save step + step = Step(self.state, action, reward, next_state, done) + steps.append(step) + + # Increase local and global counters + global_step = next(self.global_counter) + + if done: + self.state = repeat_frame(self.img_transformer.transform(self.env.reset())) + break + else: + self.state = next_state + return steps, global_step + + def update(self, steps, sess): + """ + Updates global policy and value networks using the local networks' gradients + """ + + # In order to accumulate the total return + # We will use V_hat(s') to predict the future returns + # But we will use the actual rewards if we have them + # Ex. if we have s1, s2, s3 with rewards r1, r2, r3 + # Then G(s3) = r3 + V(s4) + # G(s2) = r2 + r3 + V(s4) + # G(s1) = r1 + r2 + r3 + V(s4) + reward = 0.0 + if not steps[-1].done: + reward = self.get_value_prediction(steps[-1].next_state, sess) + + # Accumulate minibatch samples + states = [] + advantages = [] + value_targets = [] + actions = [] + + # loop through steps in reverse order + for step in reversed(steps): + reward = step.reward + self.discount_factor * reward + advantage = reward - self.get_value_prediction(step.state, sess) + # Accumulate updates + states.append(step.state) + actions.append(step.action) + advantages.append(advantage) + value_targets.append(reward) + + feed_dict = { + self.policy_net.states: np.array(states), + self.policy_net.advantage: advantages, + self.policy_net.actions: actions, + self.value_net.states: np.array(states), + self.value_net.targets: value_targets, + } + + # Train the global estimators using local gradients + global_step, pnet_loss, vnet_loss, _, _ = sess.run([ + self.global_step, + self.policy_net.loss, + self.value_net.loss, + self.pnet_train_op, + self.vnet_train_op, + ], feed_dict) + + # Theoretically could plot these later + return pnet_loss, vnet_loss From 76139acd0249eff9e44e6229d6b4c2dac3c5f48d Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 23 Dec 2018 00:38:52 -0500 Subject: [PATCH 111/329] add back join --- rl2/a3c/thread_example.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rl2/a3c/thread_example.py b/rl2/a3c/thread_example.py index 7d6e18e4..0d5fa1a4 100644 --- a/rl2/a3c/thread_example.py +++ b/rl2/a3c/thread_example.py @@ -39,7 +39,7 @@ def run(self): # join the threads -# for t in worker_threads: -# t.join() +for t in worker_threads: + t.join() print("DONE!") \ No newline at end of file From a375c2f9964d793c635d9f1b6873676af59ae968 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 18 Jan 2019 04:22:10 -0500 Subject: [PATCH 112/329] svm --- svm_class/extra_reading.txt | 47 ++++ svm_class/fake_neural_net.py | 72 ++++++ svm_class/kernel_svm_gradient_primal.py | 195 +++++++++++++++ svm_class/linear_svm_gradient.py | 149 ++++++++++++ svm_class/rbfnetwork.py | 62 +++++ svm_class/real_neural_net.py | 31 +++ svm_class/regression.py | 38 +++ svm_class/svm_gradient.py | 152 ++++++++++++ svm_class/svm_medical.py | 28 +++ svm_class/svm_mnist.py | 22 ++ svm_class/svm_smo.py | 307 ++++++++++++++++++++++++ svm_class/svm_spam.py | 83 +++++++ svm_class/util.py | 143 +++++++++++ 13 files changed, 1329 insertions(+) create mode 100644 svm_class/extra_reading.txt create mode 100644 svm_class/fake_neural_net.py create mode 100644 svm_class/kernel_svm_gradient_primal.py create mode 100644 svm_class/linear_svm_gradient.py create mode 100644 svm_class/rbfnetwork.py create mode 100644 svm_class/real_neural_net.py create mode 100644 svm_class/regression.py create mode 100644 svm_class/svm_gradient.py create mode 100644 svm_class/svm_medical.py create mode 100644 svm_class/svm_mnist.py create mode 100644 svm_class/svm_smo.py create mode 100644 svm_class/svm_spam.py create mode 100644 svm_class/util.py diff --git a/svm_class/extra_reading.txt b/svm_class/extra_reading.txt new file mode 100644 index 00000000..2538434d --- /dev/null +++ b/svm_class/extra_reading.txt @@ -0,0 +1,47 @@ +Pattern Recognition and Machine Learning +https://amzn.to/2DeexU0 + +Learning with Kernels: Support Vector Machines, Regularization, Optimization, and Beyond +https://amzn.to/2FdyP2s + +Convex Optimization +http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf + +A Tutorial on Support Vector Machines for Pattern Recognition +https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/svmtutorial.pdf + +Sequential Minimal Optimization: A Fast Algorithm for Training Support Vector Machines +https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf + +Fast Training of Support Vector Machines using Sequential Minimal Optimization +https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf + +Convex Analysis +https://press.princeton.edu/titles/1815.html + +Generalized Lagrange multiplier method for solving problems of optimum allocation of resources +https://web.archive.org/web/20110724151508/http://or.journal.informs.org/cgi/reprint/11/3/399 + +Duality in Linear Programming +http://www.civilized.com/files/duality.pdf + +Linear programming +https://en.wikipedia.org/wiki/Linear_programming#Duality + +Karush–Kuhn–Tucker conditions +https://en.wikipedia.org/wiki/Karush–Kuhn–Tucker_conditions + +A Study on Sigmoid Kernels for SVM and the Training of non-PSD Kernels by SMO-type Methods +https://www.researchgate.net/publication/2478380_A_Study_on_Sigmoid_Kernels_for_SVM_and_the_Training_of_non-PSD_Kernels_by_SMO-type_Methods + +Text Classification using String Kernels +http://www.jmlr.org/papers/volume2/lodhi02a/lodhi02a.pdf + +A Comparison of Methods for Multi-class Support Vector Machines +https://www.csie.ntu.edu.tw/~cjlin/papers/multisvm.pdf + +A Tutorial on Support Vector Regression +https://alex.smola.org/papers/2003/SmoSch03b.pdf + +LIBSVM -- A Library for Support Vector Machines +https://www.csie.ntu.edu.tw/~cjlin/libsvm/ \ No newline at end of file diff --git a/svm_class/fake_neural_net.py b/svm_class/fake_neural_net.py new file mode 100644 index 00000000..4ec37219 --- /dev/null +++ b/svm_class/fake_neural_net.py @@ -0,0 +1,72 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np + +from sklearn.svm import SVC +from util import getKaggleMNIST +from datetime import datetime +from sklearn.pipeline import Pipeline +from sklearn.linear_model import SGDClassifier +from sklearn.svm import LinearSVC +from sklearn.preprocessing import StandardScaler +from sklearn.cluster import KMeans + +# get the data: https://www.kaggle.com/c/digit-recognizer +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + + +class SigmoidFeaturizer: + def __init__(self, gamma=1.0, n_components=100, method='random'): + self.M = n_components + self.gamma = gamma + assert(method in ('random', 'kmeans')) + self.method = method + + def fit(self, X, Y=None): + if self.method == 'random': + N = len(X) + idx = np.random.randint(N, size=self.M) + self.samples = X[idx] + elif self.method == 'kmeans': + print("Fitting kmeans...") + t0 = datetime.now() + kmeans = KMeans(n_clusters=self.M) + kmeans.fit(X) + print("Finished fitting kmeans, duration:", datetime.now() - t0) + self.samples = kmeans.cluster_centers_ + return self + + def transform(self, X): + Z = self.gamma * X.dot(self.samples.T) # (Ntest x D) x (D x Nsamples) -> (Ntest x Nsamples) + return np.tanh(Z) + + def fit_transform(self, X, Y=None): + return self.fit(X, Y).transform(X) + + +# with SGD +pipeline = Pipeline([ + ('scaler', StandardScaler()), + ('sigmoid', SigmoidFeaturizer(gamma=0.05, n_components=2000, method='random')), + ('linear', SGDClassifier(max_iter=1e6, tol=1e-5)) +]) + +# with Linear SVC +# n_components = 3000 +# pipeline = Pipeline([ +# ('scaler', StandardScaler()), +# ('sigmoid', SigmoidFeaturizer(n_components=n_components)), +# ('linear', LinearSVC()) +# ]) + + +t0 = datetime.now() +pipeline.fit(Xtrain, Ytrain) +print("train duration:", datetime.now() - t0) +t0 = datetime.now() +print("train score:", pipeline.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) +t0 = datetime.now() +print("test score:", pipeline.score(Xtest, Ytest), "duration:", datetime.now() - t0) diff --git a/svm_class/kernel_svm_gradient_primal.py b/svm_class/kernel_svm_gradient_primal.py new file mode 100644 index 00000000..1be107e1 --- /dev/null +++ b/svm_class/kernel_svm_gradient_primal.py @@ -0,0 +1,195 @@ +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.model_selection import train_test_split +from sklearn.datasets import load_breast_cancer +from sklearn.preprocessing import StandardScaler +from datetime import datetime +from util import get_spiral, get_xor, get_donut, get_clouds + +import numpy as np +import matplotlib.pyplot as plt + + +# kernels +def linear(X1, X2, c=0): + return X1.dot(X2.T) + c + +def rbf(X1, X2, gamma=None): + if gamma is None: + gamma = 1.0 / X1.shape[-1] # 1 / D + # gamma = 0.05 + # gamma = 5. # for donut and spiral + if np.ndim(X1) == 1 and np.ndim(X2) == 1: + result = np.exp(-gamma * np.linalg.norm(X1 - X2)**2) + elif (np.ndim(X1) > 1 and np.ndim(X2) == 1) or (np.ndim(X1) == 1 and np.ndim(X2) > 1): + result = np.exp(-gamma * np.linalg.norm(X1 - X2, axis=1)**2) + elif np.ndim(X1) > 1 and np.ndim(X2) > 1: + result = np.exp(-gamma * np.linalg.norm(X1[:, np.newaxis] - X2[np.newaxis, :], axis=2)**2) + return result + +def sigmoid(X1, X2, gamma=0.05, c=1): + return np.tanh(gamma * X1.dot(X2.T) + c) + + +class KernelSVM: + def __init__(self, kernel=linear, C=1.0): + self.C = C + self.kernel = kernel + + def _objective(self, margins): + return 0.5 * self.u.dot(self.K.dot(self.u)) + \ + self.C * np.maximum(0, 1 - margins).sum() + + def fit(self, X, Y, lr=1e-5, n_iters=400): + N, D = X.shape + self.N = N + self.u = np.random.randn(N) + self.b = 0 + + # setup kernel matrix + self.X = X + self.Y = Y + self.K = self.kernel(X, X) + + # gradient descent + losses = [] + for _ in range(n_iters): + margins = Y * (self.u.dot(self.K) + self.b) + loss = self._objective(margins) + losses.append(loss) + + idx = np.where(margins < 1)[0] + grad_u = self.K.dot(self.u) - self.C * Y[idx].dot(self.K[idx]) + self.u -= lr * grad_u + grad_b = -self.C * Y[idx].sum() + self.b -= lr * grad_b + + self.support_ = np.where((Y * (self.u.dot(self.K) + self.b)) <= 1)[0] + print("num SVs:", len(self.support_)) + + # print("w:", self.w) + # print("b:", self.b) + + # hist of margins + m = Y * (self.u.dot(self.K) + self.b) + plt.hist(m, bins=20) + plt.show() + + plt.plot(losses) + plt.title("loss per iteration") + plt.show() + + def _decision_function(self, X): + return self.u.dot(self.kernel(self.X, X)) + self.b + + def predict(self, X): + return np.sign(self._decision_function(X)) + + def score(self, X, Y): + P = self.predict(X) + return np.mean(Y == P) + + +def plot_decision_boundary(model, X, Y, resolution=100, colors=('b', 'k', 'r')): + np.warnings.filterwarnings('ignore') + fig, ax = plt.subplots() + + # Generate coordinate grid of shape [resolution x resolution] + # and evaluate the model over the entire space + x_range = np.linspace(X[:,0].min(), X[:,0].max(), resolution) + y_range = np.linspace(X[:,1].min(), X[:,1].max(), resolution) + grid = [[model._decision_function(np.array([[xr, yr]])) for yr in y_range] for xr in x_range] + grid = np.array(grid).reshape(len(x_range), len(y_range)) + + # Plot decision contours using grid and + # make a scatter plot of training data + ax.contour(x_range, y_range, grid.T, (-1, 0, 1), linewidths=(1, 1, 1), + linestyles=('--', '-', '--'), colors=colors) + ax.scatter(X[:,0], X[:,1], + c=Y, lw=0, alpha=0.3, cmap='seismic') + + # Plot support vectors (non-zero alphas) + # as circled points (linewidth > 0) + mask = model.support_ + ax.scatter(X[:,0][mask], X[:,1][mask], + c=Y[mask], cmap='seismic') + + # debug + ax.scatter([0], [0], c='black', marker='x') + + # debug + # x_axis = np.linspace(X[:,0].min(), X[:,0].max(), 100) + # w = model.w + # b = model.b + # # w[0]*x + w[1]*y + b = 0 + # y_axis = -(w[0]*x_axis + b)/w[1] + # plt.plot(x_axis, y_axis, color='purple') + # margin_p = (1 - w[0]*x_axis - b)/w[1] + # plt.plot(x_axis, margin_p, color='orange') + # margin_n = -(1 + w[0]*x_axis + b)/w[1] + # plt.plot(x_axis, margin_n, color='orange') + + plt.show() + + +def clouds(): + X, Y = get_clouds() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, linear, 1e-5, 500 + + +def medical(): + data = load_breast_cancer() + X, Y = data.data, data.target + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, linear, 1e-3, 200 + +def xor(): + X, Y = get_xor() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + kernel = lambda X1, X2: rbf(X1, X2, gamma=3.) + return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-3, 500 + +def donut(): + X, Y = get_donut() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + kernel = lambda X1, X2: rbf(X1, X2, gamma=1.) + return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-3, 300 + +def spiral(): + X, Y = get_spiral() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + kernel = lambda X1, X2: rbf(X1, X2, gamma=5.) + return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-3, 500 + + +if __name__ == '__main__': + Xtrain, Xtest, Ytrain, Ytest, kernel, lr, n_iters = donut() + print("Possible labels:", set(Ytrain)) + + # make sure the targets are (-1, +1) + Ytrain[Ytrain == 0] = -1 + Ytest[Ytest == 0] = -1 + + # scale the data + scaler = StandardScaler() + Xtrain = scaler.fit_transform(Xtrain) + Xtest = scaler.transform(Xtest) + + # now we'll use our custom implementation + model = KernelSVM(kernel=kernel, C=1.0) + + t0 = datetime.now() + model.fit(Xtrain, Ytrain, lr=lr, n_iters=n_iters) + print("train duration:", datetime.now() - t0) + t0 = datetime.now() + print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) + t0 = datetime.now() + print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) + + if Xtrain.shape[1] == 2: + plot_decision_boundary(model, Xtrain, Ytrain) diff --git a/svm_class/linear_svm_gradient.py b/svm_class/linear_svm_gradient.py new file mode 100644 index 00000000..6fcccae3 --- /dev/null +++ b/svm_class/linear_svm_gradient.py @@ -0,0 +1,149 @@ +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.model_selection import train_test_split +from sklearn.datasets import load_breast_cancer +from sklearn.preprocessing import StandardScaler +from datetime import datetime +from util import get_clouds + +import numpy as np +import matplotlib.pyplot as plt + + +class LinearSVM: + def __init__(self, C=1.0): + self.C = C + + def _objective(self, margins): + return 0.5 * self.w.dot(self.w) + self.C * np.maximum(0, 1 - margins).sum() + + def fit(self, X, Y, lr=1e-5, n_iters=400): + N, D = X.shape + self.N = N + self.w = np.random.randn(D) + self.b = 0 + + # gradient descent + losses = [] + for _ in range(n_iters): + margins = Y * self._decision_function(X) + loss = self._objective(margins) + losses.append(loss) + + idx = np.where(margins < 1)[0] + grad_w = self.w - self.C * Y[idx].dot(X[idx]) + self.w -= lr * grad_w + grad_b = -self.C * Y[idx].sum() + self.b -= lr * grad_b + + self.support_ = np.where((Y * self._decision_function(X)) <= 1)[0] + print("num SVs:", len(self.support_)) + + print("w:", self.w) + print("b:", self.b) + + # hist of margins + # m = Y * self._decision_function(X) + # plt.hist(m, bins=20) + # plt.show() + + plt.plot(losses) + plt.title("loss per iteration") + plt.show() + + def _decision_function(self, X): + return X.dot(self.w) + self.b + + def predict(self, X): + return np.sign(self._decision_function(X)) + + def score(self, X, Y): + P = self.predict(X) + return np.mean(Y == P) + + +def plot_decision_boundary(model, X, Y, resolution=100, colors=('b', 'k', 'r')): + np.warnings.filterwarnings('ignore') + fig, ax = plt.subplots() + + # Generate coordinate grid of shape [resolution x resolution] + # and evaluate the model over the entire space + x_range = np.linspace(X[:,0].min(), X[:,0].max(), resolution) + y_range = np.linspace(X[:,1].min(), X[:,1].max(), resolution) + grid = [[model._decision_function(np.array([[xr, yr]])) for yr in y_range] for xr in x_range] + grid = np.array(grid).reshape(len(x_range), len(y_range)) + + # Plot decision contours using grid and + # make a scatter plot of training data + ax.contour(x_range, y_range, grid.T, (-1, 0, 1), linewidths=(1, 1, 1), + linestyles=('--', '-', '--'), colors=colors) + ax.scatter(X[:,0], X[:,1], + c=Y, lw=0, alpha=0.3, cmap='seismic') + + # Plot support vectors (non-zero alphas) + # as circled points (linewidth > 0) + mask = model.support_ + ax.scatter(X[:,0][mask], X[:,1][mask], + c=Y[mask], cmap='seismic') + + # debug + ax.scatter([0], [0], c='black', marker='x') + + # debug + # x_axis = np.linspace(X[:,0].min(), X[:,0].max(), 100) + # w = model.w + # b = model.b + # # w[0]*x + w[1]*y + b = 0 + # y_axis = -(w[0]*x_axis + b)/w[1] + # plt.plot(x_axis, y_axis, color='purple') + # margin_p = (1 - w[0]*x_axis - b)/w[1] + # plt.plot(x_axis, margin_p, color='orange') + # margin_n = -(1 + w[0]*x_axis + b)/w[1] + # plt.plot(x_axis, margin_n, color='orange') + + plt.show() + + +def clouds(): + X, Y = get_clouds() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, 1e-3, 200 + + +def medical(): + data = load_breast_cancer() + X, Y = data.data, data.target + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, 1e-3, 200 + + +if __name__ == '__main__': + Xtrain, Xtest, Ytrain, Ytest, lr, n_iters = clouds() + print("Possible labels:", set(Ytrain)) + + # make sure the targets are (-1, +1) + Ytrain[Ytrain == 0] = -1 + Ytest[Ytest == 0] = -1 + + # scale the data + scaler = StandardScaler() + Xtrain = scaler.fit_transform(Xtrain) + Xtest = scaler.transform(Xtest) + + # now we'll use our custom implementation + model = LinearSVM(C=1.0) + + t0 = datetime.now() + model.fit(Xtrain, Ytrain, lr=lr, n_iters=n_iters) + print("train duration:", datetime.now() - t0) + t0 = datetime.now() + print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) + t0 = datetime.now() + print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) + + if Xtrain.shape[1] == 2: + plot_decision_boundary(model, Xtrain, Ytrain) diff --git a/svm_class/rbfnetwork.py b/svm_class/rbfnetwork.py new file mode 100644 index 00000000..e847666f --- /dev/null +++ b/svm_class/rbfnetwork.py @@ -0,0 +1,62 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.svm import SVC +from util import getKaggleMNIST +from datetime import datetime +from sklearn.pipeline import Pipeline +from sklearn.kernel_approximation import RBFSampler +from sklearn.linear_model import SGDClassifier +from sklearn.svm import LinearSVC +from sklearn.pipeline import FeatureUnion +from sklearn.preprocessing import StandardScaler +from sklearn.kernel_approximation import Nystroem + +# get the data: https://www.kaggle.com/c/digit-recognizer +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# linear SGD classifier +# pipeline = Pipeline([('linear', SGDClassifier(max_iter=1e6, tol=1e-5))]) + +# linear SVC - a bit faster than SVC with linear kernel +# pipeline = Pipeline([('linear', LinearSVC())]) + +# one RBFSampler with linear SGD classifier +# pipeline = Pipeline([ +# ('rbf', RBFSampler(gamma=0.01, n_components=1000)), +# ('linear', SGDClassifier(max_iter=1e6, tol=1e-5))]) + +# multiple RBFSamplers +# n_components = 2000 +# featurizer = FeatureUnion([ +# ("rbf1", RBFSampler(gamma=0.01, n_components=n_components)), +# ("rbf2", RBFSampler(gamma=0.005, n_components=n_components)), +# ("rbf3", RBFSampler(gamma=0.001, n_components=n_components)), +# ]) +# pipeline = Pipeline([('rbf', featurizer), ('linear', SGDClassifier(max_iter=1e6, tol=1e-5))]) + +# Nystroem approximation +# pipeline = Pipeline([ +# ('rbf', Nystroem(gamma=0.05, n_components=1000)), +# ('linear', SGDClassifier(max_iter=1e6, tol=1e-5))]) + +# multiple Nystroem +n_components = 1000 +featurizer = FeatureUnion([ + ("rbf0", Nystroem(gamma=0.05, n_components=n_components)), + ("rbf1", Nystroem(gamma=0.01, n_components=n_components)), + ("rbf2", Nystroem(gamma=0.005, n_components=n_components)), + ("rbf3", Nystroem(gamma=0.001, n_components=n_components)), + ]) +pipeline = Pipeline([('rbf', featurizer), ('linear', SGDClassifier(max_iter=1e6, tol=1e-5))]) + + +t0 = datetime.now() +pipeline.fit(Xtrain, Ytrain) +print("train duration:", datetime.now() - t0) +t0 = datetime.now() +print("train score:", pipeline.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) +t0 = datetime.now() +print("test score:", pipeline.score(Xtest, Ytest), "duration:", datetime.now() - t0) diff --git a/svm_class/real_neural_net.py b/svm_class/real_neural_net.py new file mode 100644 index 00000000..2fef89ab --- /dev/null +++ b/svm_class/real_neural_net.py @@ -0,0 +1,31 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np + +from util import getKaggleMNIST +from datetime import datetime +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.neural_network import MLPClassifier + +# get the data: https://www.kaggle.com/c/digit-recognizer +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# scale first +pipeline = Pipeline([ + # ('scaler', StandardScaler()), + ('mlp', MLPClassifier(hidden_layer_sizes=(500,), activation='tanh')), +]) + + + +t0 = datetime.now() +pipeline.fit(Xtrain, Ytrain) +print("train duration:", datetime.now() - t0) +t0 = datetime.now() +print("train score:", pipeline.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) +t0 = datetime.now() +print("test score:", pipeline.score(Xtest, Ytest), "duration:", datetime.now() - t0) diff --git a/svm_class/regression.py b/svm_class/regression.py new file mode 100644 index 00000000..1fb7b4b2 --- /dev/null +++ b/svm_class/regression.py @@ -0,0 +1,38 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from datetime import datetime +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVR + +# get the data: https://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength +df = pd.read_excel('../large_files/Concrete_Data.xls') +df.columns = list(range(df.shape[1])) + +X = df[[0,1,2,3,4,5,6,7]].values +Y = df[8].values + +# split the data into train and test sets +# this lets us simulate how our model will perform in the future +Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + +# scale the data +scaler = StandardScaler() +Xtrain = scaler.fit_transform(Xtrain) +Xtest = scaler.transform(Xtest) + +target_scaler = StandardScaler() +Ytrain = target_scaler.fit_transform(Ytrain.reshape(-1, 1)).flatten() +Ytest = target_scaler.transform(Ytest.reshape(-1, 1)).flatten() + +model = SVR(kernel='rbf') +model.fit(Xtrain, Ytrain) +print("train score:", model.score(Xtrain, Ytrain)) +print("test score:", model.score(Xtest, Ytest)) diff --git a/svm_class/svm_gradient.py b/svm_class/svm_gradient.py new file mode 100644 index 00000000..65c9b2c1 --- /dev/null +++ b/svm_class/svm_gradient.py @@ -0,0 +1,152 @@ +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.model_selection import train_test_split +from sklearn.datasets import load_breast_cancer +from sklearn.preprocessing import StandardScaler +from datetime import datetime +from util import get_spiral, get_xor, get_donut, get_clouds, plot_decision_boundary + +import numpy as np +import matplotlib.pyplot as plt + + +# kernels +def linear(X1, X2, c=0): + return X1.dot(X2.T) + c + +def rbf(X1, X2, gamma=None): + if gamma is None: + gamma = 1.0 / X1.shape[-1] # 1 / D + # gamma = 0.05 + # gamma = 5. # for donut and spiral + if np.ndim(X1) == 1 and np.ndim(X2) == 1: + result = np.exp(-gamma * np.linalg.norm(X1 - X2)**2) + elif (np.ndim(X1) > 1 and np.ndim(X2) == 1) or (np.ndim(X1) == 1 and np.ndim(X2) > 1): + result = np.exp(-gamma * np.linalg.norm(X1 - X2, axis=1)**2) + elif np.ndim(X1) > 1 and np.ndim(X2) > 1: + result = np.exp(-gamma * np.linalg.norm(X1[:, np.newaxis] - X2[np.newaxis, :], axis=2)**2) + return result + +def sigmoid(X1, X2, gamma=0.05, c=1): + return np.tanh(gamma * X1.dot(X2.T) + c) + + +class SVM: + def __init__(self, kernel, C=1.0): + self.kernel = kernel + self.C = C + + def _train_objective(self): + return np.sum(self.alphas) - 0.5 * np.sum(self.YYK * np.outer(self.alphas, self.alphas)) + + def fit(self, X, Y, lr=1e-5, n_iters=400): + # we need these to make future predictions + self.Xtrain = X + self.Ytrain = Y + self.N = X.shape[0] + self.alphas = np.random.random(self.N) + self.b = 0 + + # kernel matrix + self.K = self.kernel(X, X) + self.YY = np.outer(Y, Y) + self.YYK = self.K * self.YY + + # gradient ascent + losses = [] + for _ in range(n_iters): + loss = self._train_objective() + losses.append(loss) + grad = np.ones(self.N) - self.YYK.dot(self.alphas) + self.alphas += lr * grad + + # clip + self.alphas[self.alphas < 0] = 0 + self.alphas[self.alphas > self.C] = self.C + + # distrbution of bs + idx = np.where((self.alphas) > 0 & (self.alphas < self.C))[0] + bs = Y[idx] - (self.alphas * Y).dot(self.kernel(X, X[idx])) + self.b = np.mean(bs) + + plt.plot(losses) + plt.title("loss per iteration") + plt.show() + + def _decision_function(self, X): + return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X)) + self.b + + def predict(self, X): + return np.sign(self._decision_function(X)) + + def score(self, X, Y): + P = self.predict(X) + return np.mean(Y == P) + + +def medical(): + data = load_breast_cancer() + X, Y = data.data, data.target + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, rbf, 1e-3, 200 + +def medical_sigmoid(): + data = load_breast_cancer() + X, Y = data.data, data.target + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, sigmoid, 1e-3, 200 + +def xor(): + X, Y = get_xor() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + kernel = lambda X1, X2: rbf(X1, X2, gamma=5.) + return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-2, 300 + +def donut(): + X, Y = get_donut() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + kernel = lambda X1, X2: rbf(X1, X2, gamma=5.) + return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-2, 300 + +def spiral(): + X, Y = get_spiral() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + kernel = lambda X1, X2: rbf(X1, X2, gamma=5.) + return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-2, 300 + +def clouds(): + X, Y = get_clouds() + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest, linear, 1e-5, 400 + + +if __name__ == '__main__': + Xtrain, Xtest, Ytrain, Ytest, kernel, lr, n_iters = spiral() + print("Possible labels:", set(Ytrain)) + + # make sure the targets are (-1, +1) + Ytrain[Ytrain == 0] = -1 + Ytest[Ytest == 0] = -1 + + # scale the data + scaler = StandardScaler() + Xtrain = scaler.fit_transform(Xtrain) + Xtest = scaler.transform(Xtest) + + # now we'll use our custom implementation + model = SVM(kernel=kernel, C=1.0) + + t0 = datetime.now() + model.fit(Xtrain, Ytrain, lr=lr, n_iters=n_iters) + print("train duration:", datetime.now() - t0) + t0 = datetime.now() + print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) + t0 = datetime.now() + print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) + + if Xtrain.shape[1] == 2: + plot_decision_boundary(model) diff --git a/svm_class/svm_medical.py b/svm_class/svm_medical.py new file mode 100644 index 00000000..bee52069 --- /dev/null +++ b/svm_class/svm_medical.py @@ -0,0 +1,28 @@ +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.svm import SVC +from sklearn.model_selection import train_test_split +from sklearn.datasets import load_breast_cancer +from sklearn.preprocessing import StandardScaler + +# load the data +data = load_breast_cancer() + +# split the data into train and test sets +# this lets us simulate how our model will perform in the future +Xtrain, Xtest, Ytrain, Ytest = train_test_split(data.data, data.target, test_size=0.33) + +# scale the data +scaler = StandardScaler() +Xtrain = scaler.fit_transform(Xtrain) +Xtest = scaler.transform(Xtest) + +model = SVC(kernel='rbf') +# model = SVC() +model.fit(Xtrain, Ytrain) +print("train score:", model.score(Xtrain, Ytrain)) +print("test score:", model.score(Xtest, Ytest)) diff --git a/svm_class/svm_mnist.py b/svm_class/svm_mnist.py new file mode 100644 index 00000000..663b5847 --- /dev/null +++ b/svm_class/svm_mnist.py @@ -0,0 +1,22 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.svm import SVC +from util import getKaggleMNIST +from datetime import datetime + +# get the data: https://www.kaggle.com/c/digit-recognizer +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# model = SVC() +model = SVC(C=5., gamma=.05) + +t0 = datetime.now() +model.fit(Xtrain, Ytrain) +print("train duration:", datetime.now() - t0) +t0 = datetime.now() +print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) +t0 = datetime.now() +print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) diff --git a/svm_class/svm_smo.py b/svm_class/svm_smo.py new file mode 100644 index 00000000..6ab5c372 --- /dev/null +++ b/svm_class/svm_smo.py @@ -0,0 +1,307 @@ +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.model_selection import train_test_split +from sklearn.datasets import load_breast_cancer +from sklearn.preprocessing import StandardScaler +from datetime import datetime +from util import get_spiral, get_xor, get_donut, get_clouds, plot_decision_boundary + +import numpy as np +import matplotlib.pyplot as plt + + +# kernels +def linear(X1, X2): + return X1.dot(X2.T) + +def rbf(X1, X2): + # gamma = 1.0 / X1.shape[-1] # 1 / D + gamma = 5. # for donut and spiral + if np.ndim(X1) == 1 and np.ndim(X2) == 1: + result = np.exp(-gamma * np.linalg.norm(X1 - X2)**2) + elif (np.ndim(X1) > 1 and np.ndim(X2) == 1) or (np.ndim(X1) == 1 and np.ndim(X2) > 1): + result = np.exp(-gamma * np.linalg.norm(X1 - X2, axis=1)**2) + elif np.ndim(X1) > 1 and np.ndim(X2) > 1: + result = np.exp(-gamma * np.linalg.norm(X1[:, np.newaxis] - X2[np.newaxis, :], axis=2)**2) + return result + +def sigmoid(X1, X2, gamma=0.05, c=1): + return np.tanh(gamma * X1.dot(X2.T) + c) + + +class SVM: + def __init__(self, kernel, C=1.0, right=True): + self.kernel = kernel + self.C = C + self.right = right + + def _loss(self, X, Y): + # return -np.sum(self.alphas) + \ + # 0.5 * np.sum(np.outer(Y, Y) * self.kernel(X, X) * np.outer(self.alphas, self.alphas)) + return -np.sum(self.alphas) + \ + 0.5 * np.sum(self.YYK * np.outer(self.alphas, self.alphas)) + + def _take_step(self, i1, i2): + # returns True if model params changed, False otherwise + + # Skip if chosen alphas are the same + if i1 == i2: + return False + + alph1 = self.alphas[i1] + alph2 = self.alphas[i2] + y1 = self.Ytrain[i1] + y2 = self.Ytrain[i2] + E1 = self.errors[i1] + E2 = self.errors[i2] + s = y1 * y2 + + # Compute L & H, the bounds on new possible alpha values + if (y1 != y2): + L = max(0, alph2 - alph1) + H = min(self.C, self.C + alph2 - alph1) + elif (y1 == y2): + L = max(0, alph1 + alph2 - self.C) + H = min(self.C, alph1 + alph2) + if (L == H): + return False + + # Compute kernel & 2nd derivative eta + k11 = self.kernel(self.Xtrain[i1], self.Xtrain[i1]) + k12 = self.kernel(self.Xtrain[i1], self.Xtrain[i2]) + k22 = self.kernel(self.Xtrain[i2], self.Xtrain[i2]) + eta = k11 + k22 - 2 * k12 + + # Usual case - eta is non-negative + if eta > 0: + a2 = alph2 + y2 * (E1 - E2) / eta + # Clip a2 based on bounds L & H + if (a2 < L): + a2 = L + elif (a2 > H): + a2 = H + # else a2 remains unchanged + + # Unusual case - eta is negative + # alpha2 should be set to whichever extreme (L or H) that yields the lowest + # value of the objective + else: + print("***** eta < 0 *****") + # keep it to assign it back later + alphas_i2 = self.alphas[i2] + # alphas_adj = self.alphas.copy() + # alphas_adj[i2] = L + self.alphas[i2] = L + # objective function output with a2 = L + Lobj = self._loss(self.Xtrain, self.Ytrain) + # alphas_adj[i2] = H + self.alphas[i2] = H + # objective function output with a2 = H + Hobj = self._loss(self.Xtrain, self.Ytrain) + if Lobj < Hobj - self.eps: + a2 = L + elif Lobj > Hobj + self.eps: + a2 = H + else: + a2 = alph2 + + # now assign it back + self.alphas[i2] = alphas_i2 + + # Push a2 to 0 or C if very close + if a2 < 1e-8: + a2 = 0.0 + elif a2 > (self.C - 1e-8): + a2 = self.C + + # If examples can't be optimized within epsilon (eps), skip this pair + if (np.abs(a2 - alph2) < self.eps * (a2 + alph2 + self.eps)): + return False + + # Calculate new alpha 1 (a1) + a1 = alph1 + s * (alph2 - a2) + + # Update threshold b to reflect newly calculated alphas + # Calculate both possible thresholds + b1 = E1 + y1 * (a1 - alph1) * k11 + y2 * (a2 - alph2) * k12 + self.b + b2 = E2 + y1 * (a1 - alph1) * k12 + y2 * (a2 - alph2) * k22 + self.b + + # Set new threshold based on if a1 or a2 is bound by L and/or H + if 0 < a1 and a1 < self.C: + b_new = b1 + elif 0 < a2 and a2 < self.C: + b_new = b2 + # Average thresholds if both are bound + else: + b_new = (b1 + b2) * 0.5 + + # Update model object with new alphas & threshold + self.alphas[i1] = a1 + self.alphas[i2] = a2 + + # Update error cache + # Error cache for optimized alphas is set to 0 if they're unbound + for index, alph in zip([i1, i2], [a1, a2]): + if 0.0 < alph < self.C: + self.errors[index] = 0.0 + + # Set non-optimized errors based on equation 12.11 in SMO book + # non_opt = [n for n in range(self.N) if (n != i1 and n != i2 and self.alphas[n] < self.C and self.alphas[n] > 0)] #new + non_opt = [n for n in range(self.N) if (n != i1 and n != i2)] # old + self.errors[non_opt] = self.errors[non_opt] + \ + y1*(a1 - alph1)*self.kernel(self.Xtrain[i1], self.Xtrain[non_opt]) + \ + y2*(a2 - alph2)*self.kernel(self.Xtrain[i2], self.Xtrain[non_opt]) + self.b - b_new + + # Update model threshold + self.b = b_new + + return True + + def _examine_example(self, i2): + # returns True (1) if alphas changed, False (0) otherwise + y2 = self.Ytrain[i2] + alph2 = self.alphas[i2] + E2 = self.errors[i2] + r2 = E2 * y2 + + # Proceed if error is within specified tolerance (tol) + if ((r2 < -self.tol and alph2 < self.C) or (r2 > self.tol and alph2 > 0)): + + if len(self.alphas[(self.alphas != 0) & (self.alphas != self.C)]) > 1: + # Use 2nd choice heuristic is choose max difference in error + if self.errors[i2] > 0: + i1 = np.argmin(self.errors) + elif self.errors[i2] <= 0: + i1 = np.argmax(self.errors) + if self._take_step(i1, i2): + return 1 + + # Loop through non-zero and non-C alphas, starting at a random point + # e.g. [1,2,3,4,5] -> [4,5,1,2,3] + for i1 in np.roll(np.where((self.alphas != 0) & (self.alphas != self.C))[0], + np.random.choice(np.arange(self.N))): + if self._take_step(i1, i2): + return 1 + + # loop through all alphas, starting at a random point + for i1 in np.roll(np.arange(self.N), np.random.choice(np.arange(self.N))): + if self._take_step(i1, i2): + return 1 + + return 0 + + def fit(self, X, Y, tol=0.00001, eps=0.01): + # we need these to make future predictions + self.tol = tol + self.eps = eps + self.Xtrain = X + self.Ytrain = Y + self.N = X.shape[0] + self.alphas = np.zeros(self.N) + self.b = 0. + self.errors = self._decision_function(self.Xtrain) - self.Ytrain + + # kernel matrix + self.K = self.kernel(X, X) + self.YY = np.outer(Y, Y) + self.YYK = self.K * self.YY + + iter_ = 0 + numChanged = 0 + examineAll = 1 + losses = [] + + while numChanged > 0 or examineAll: + print("iter:", iter_) + iter_ += 1 + numChanged = 0 + if examineAll: + # loop over all training examples + for i in range(self.alphas.shape[0]): + examine_result = self._examine_example(i) + numChanged += examine_result + if examine_result: + loss = self._loss(self.Xtrain, self.Ytrain) + losses.append(loss) + else: + # loop over examples where alphas are not already at their limits + for i in np.where((self.alphas != 0) & (self.alphas != self.C))[0]: + examine_result = self._examine_example(i) + numChanged += examine_result + if examine_result: + loss = self._loss(self.Xtrain, self.Ytrain) + losses.append(loss) + if examineAll == 1: + examineAll = 0 + elif numChanged == 0: + examineAll = 1 + + plt.plot(losses) + plt.title("loss per iteration") + plt.show() + + def _decision_function(self, X): + if self.right: + return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X)) - self.b + else: + return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X) - self.b) + + + def predict(self, X): + return np.sign(self._decision_function(X)) + + def score(self, X, Y): + P = self.predict(X) + return np.mean(Y == P) + + +def get_data(): + ### medical data + # load the data + # data = load_breast_cancer() + # X, Y = data.data, data.target + + # X, Y = get_xor() + # X, Y = get_donut() + # X, Y = get_spiral() + X, Y = get_clouds() + + # split the data into train and test sets + # this lets us simulate how our model will perform in the future + Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + return Xtrain, Xtest, Ytrain, Ytest + + +if __name__ == '__main__': + # np.random.seed(3) + Xtrain, Xtest, Ytrain, Ytest = get_data() + print("Possible labels:", set(Ytrain)) + + # make sure the targets are (-1, +1) + Ytrain[Ytrain == 0] = -1 + Ytest[Ytest == 0] = -1 + + # scale the data + scaler = StandardScaler() + Xtrain = scaler.fit_transform(Xtrain) + Xtest = scaler.transform(Xtest) + + # now we'll use our custom implementation + for right in (True,): + print("Right:", right) + model = SVM(kernel=linear, right=right) + + t0 = datetime.now() + model.fit(Xtrain, Ytrain) + print("train duration:", datetime.now() - t0) + t0 = datetime.now() + print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) + t0 = datetime.now() + print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) + + if Xtrain.shape[1] == 2: + plot_decision_boundary(model) diff --git a/svm_class/svm_spam.py b/svm_class/svm_spam.py new file mode 100644 index 00000000..af6086c1 --- /dev/null +++ b/svm_class/svm_spam.py @@ -0,0 +1,83 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from sklearn.svm import SVC +from datetime import datetime +import pandas as pd +import matplotlib.pyplot as plt +from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer +from sklearn.model_selection import train_test_split +from wordcloud import WordCloud + +# data from: +# https://www.kaggle.com/uciml/sms-spam-collection-dataset +# file contains some invalid chars +# depending on which version of pandas you have +# an error may be thrown +df = pd.read_csv('../large_files/spam.csv', encoding='ISO-8859-1') + +# drop unnecessary columns +df = df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1) + +# rename columns to something better +df.columns = ['labels', 'data'] + +# create binary labels +df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1}) +Y = df['b_labels'].values + +# try multiple ways of calculating features +tfidf = TfidfVectorizer(decode_error='ignore') +X = tfidf.fit_transform(df['data']) + +# count_vectorizer = CountVectorizer(decode_error='ignore') +# X = count_vectorizer.fit_transform(df['data']) + +# split the data +Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) + +model = SVC(kernel='linear', C=2.) + +t0 = datetime.now() +model.fit(Xtrain, Ytrain) +print("train duration:", datetime.now() - t0) +t0 = datetime.now() +print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) +t0 = datetime.now() +print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) + + + +# visualize the data +def visualize(label): + words = '' + for msg in df[df['labels'] == label]['data']: + msg = msg.lower() + words += msg + ' ' + wordcloud = WordCloud(width=600, height=400).generate(words) + plt.imshow(wordcloud) + plt.axis('off') + plt.title(label) + plt.show() + +visualize('spam') +visualize('ham') + + +# see what we're getting wrong +df['predictions'] = model.predict(X) + +# things that should be spam +print("*** things that should be spam ***") +sneaky_spam = df[(df['predictions'] == 0) & (df['b_labels'] == 1)]['data'] +for msg in sneaky_spam: + print(msg) + +# things that should not be spam +print("*** things that should not be spam ***") +not_actually_spam = df[(df['predictions'] == 1) & (df['b_labels'] == 0)]['data'] +for msg in not_actually_spam: + print(msg) + diff --git a/svm_class/util.py b/svm_class/util.py new file mode 100644 index 00000000..a40b557b --- /dev/null +++ b/svm_class/util.py @@ -0,0 +1,143 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from sklearn.utils import shuffle +from sklearn.preprocessing import StandardScaler + + +def getKaggleMNIST(): + # MNIST data: + # column 0 is labels + # column 1-785 is data, with values 0 .. 255 + # total size of CSV: (42000, 784) + train = pd.read_csv('../large_files/train.csv').values.astype(np.float32) + train = shuffle(train) + + Xtrain = train[:-1000,1:] + Ytrain = train[:-1000,0].astype(np.int32) + + Xtest = train[-1000:,1:] + Ytest = train[-1000:,0].astype(np.int32) + + # scale the data + Xtrain /= 255. + Xtest /= 255. + # scaler = StandardScaler() + # Xtrain = scaler.fit_transform(Xtrain) + # Xtest = scaler.transform(Xtest) + + return Xtrain, Ytrain, Xtest, Ytest + + +def get_spiral(): + # Idea: radius -> low...high + # (don't start at 0, otherwise points will be "mushed" at origin) + # angle = low...high proportional to radius + # [0, 2pi/6, 4pi/6, ..., 10pi/6] --> [pi/2, pi/3 + pi/2, ..., ] + # x = rcos(theta), y = rsin(theta) as usual + + radius = np.linspace(1, 10, 100) + thetas = np.empty((6, 100)) + for i in range(6): + start_angle = np.pi*i / 3.0 + end_angle = start_angle + np.pi / 2 + points = np.linspace(start_angle, end_angle, 100) + thetas[i] = points + + # convert into cartesian coordinates + x1 = np.empty((6, 100)) + x2 = np.empty((6, 100)) + for i in range(6): + x1[i] = radius * np.cos(thetas[i]) + x2[i] = radius * np.sin(thetas[i]) + + # inputs + X = np.empty((600, 2)) + X[:,0] = x1.flatten() + X[:,1] = x2.flatten() + + # add noise + X += np.random.randn(600, 2)*0.5 + + # targets + Y = np.array([0]*100 + [1]*100 + [0]*100 + [1]*100 + [0]*100 + [1]*100) + return X, Y + + +def get_xor(): + X = np.zeros((200, 2)) + X[:50] = np.random.random((50, 2)) / 2 + 0.5 # (0.5-1, 0.5-1) + X[50:100] = np.random.random((50, 2)) / 2 # (0-0.5, 0-0.5) + X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]]) # (0-0.5, 0.5-1) + X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]]) # (0.5-1, 0-0.5) + Y = np.array([0]*100 + [1]*100) + return X, Y + + +def get_donut(): + N = 200 + R_inner = 5 + R_outer = 10 + + # distance from origin is radius + random normal + # angle theta is uniformly distributed between (0, 2pi) + R1 = np.random.randn(N//2) + R_inner + theta = 2*np.pi*np.random.random(N//2) + X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T + + R2 = np.random.randn(N//2) + R_outer + theta = 2*np.pi*np.random.random(N//2) + X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T + + X = np.concatenate([ X_inner, X_outer ]) + Y = np.array([0]*(N//2) + [1]*(N//2)) + return X, Y + + +def get_clouds(): + N = 1000 + c1 = np.array([2, 2]) + c2 = np.array([-2, -2]) + # c1 = np.array([0, 3]) + # c2 = np.array([0, 0]) + X1 = np.random.randn(N, 2) + c1 + X2 = np.random.randn(N, 2) + c2 + X = np.vstack((X1, X2)) + Y = np.array([-1]*N + [1]*N) + return X, Y + + +def plot_decision_boundary(model, resolution=100, colors=('b', 'k', 'r')): + np.warnings.filterwarnings('ignore') + fig, ax = plt.subplots() + + # Generate coordinate grid of shape [resolution x resolution] + # and evaluate the model over the entire space + x_range = np.linspace(model.Xtrain[:,0].min(), model.Xtrain[:,0].max(), resolution) + y_range = np.linspace(model.Xtrain[:,1].min(), model.Xtrain[:,1].max(), resolution) + grid = [[model._decision_function(np.array([[xr, yr]])) for yr in y_range] for xr in x_range] + grid = np.array(grid).reshape(len(x_range), len(y_range)) + + # Plot decision contours using grid and + # make a scatter plot of training data + ax.contour(x_range, y_range, grid.T, (-1, 0, 1), linewidths=(1, 1, 1), + linestyles=('--', '-', '--'), colors=colors) + ax.scatter(model.Xtrain[:,0], model.Xtrain[:,1], + c=model.Ytrain, lw=0, alpha=0.3, cmap='seismic') + + # Plot support vectors (non-zero alphas) + # as circled points (linewidth > 0) + mask = model.alphas > 0. + ax.scatter(model.Xtrain[:,0][mask], model.Xtrain[:,1][mask], + c=model.Ytrain[mask], cmap='seismic') + + # debug + ax.scatter([0], [0], c='black', marker='x') + + plt.show() From afadbeeb380a89cb11da1d30c5efd0822910677b Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 18 Jan 2019 22:00:56 -0500 Subject: [PATCH 113/329] update --- cnn_class2/style_transfer3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cnn_class2/style_transfer3.py b/cnn_class2/style_transfer3.py index bc12d49c..8f383d1d 100644 --- a/cnn_class2/style_transfer3.py +++ b/cnn_class2/style_transfer3.py @@ -78,7 +78,7 @@ def load_img_and_preprocess(path, shape=None): # we only want 1 output # remember you can call vgg.summary() to see a list of layers # 1,2,4,5,7-9,11-13,15-17 -content_model = Model(vgg.input, vgg.layers[13].get_output_at(1)) +content_model = Model(vgg.input, vgg.layers[13].get_output_at(0)) content_target = K.variable(content_model.predict(content_img)) From d4149e58ca5759f9d6d900dfe71d89a34bf87497 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 20 Jan 2019 01:55:22 -0500 Subject: [PATCH 114/329] update readme --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ff892440..1fa924e4 100644 --- a/README.md +++ b/README.md @@ -69,4 +69,10 @@ Deep Learning: Advanced Computer Vision https://deeplearningcourses.com/c/advanced-computer-vision Deep Learning: Advanced NLP and RNNs -https://deeplearningcourses.com/c/deep-learning-advanced-nlp \ No newline at end of file +https://deeplearningcourses.com/c/deep-learning-advanced-nlp + +Recommender Systems and Deep Learning in Python +https://deeplearningcourses.com/c/recommender-systems + +Machine Learning and AI: Support Vector Machines in Python +https://deeplearningcourses.com/c/support-vector-machines-in-python From 25eb869e2b41c33af0316d256ffb2b1900ab8a66 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 20 Jan 2019 01:57:28 -0500 Subject: [PATCH 115/329] update --- README.md | 60 +++++++++++++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 1fa924e4..34ef4ab7 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,36 @@ Find associated courses at https://deeplearningcourses.com Direct Course Links =================== +Recommender Systems and Deep Learning in Python +https://deeplearningcourses.com/c/recommender-systems + +Machine Learning and AI: Support Vector Machines in Python +https://deeplearningcourses.com/c/support-vector-machines-in-python + +Deep Learning: Advanced Computer Vision +https://deeplearningcourses.com/c/advanced-computer-vision + +Deep Learning: Advanced NLP and RNNs +https://deeplearningcourses.com/c/deep-learning-advanced-nlp + +Deep Learning: GANs and Variational Autoencoders +https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders + +Advanced AI: Deep Reinforcement Learning in Python +https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python + +Artificial Intelligence: Reinforcement Learning in Python +https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python + +Natural Language Processing with Deep Learning in Python +https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python + +Deep Learning: Recurrent Neural Networks in Python +https://deeplearningcourses.com/c/deep-learning-recurrent-neural-networks-in-python + +Unsupervised Machine Learning: Hidden Markov Models in Python +https://deeplearningcourses.com/c/unsupervised-machine-learning-hidden-markov-models-in-python + Deep Learning Prerequisites: The Numpy Stack in Python https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python @@ -46,33 +76,3 @@ https://deeplearningcourses.com/c/deep-learning-convolutional-neural-networks-th Unsupervised Deep Learning in Python https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python - -Unsupervised Machine Learning: Hidden Markov Models in Python -https://deeplearningcourses.com/c/unsupervised-machine-learning-hidden-markov-models-in-python - -Deep Learning: Recurrent Neural Networks in Python -https://deeplearningcourses.com/c/deep-learning-recurrent-neural-networks-in-python - -Advanced Natural Language Processing: Deep Learning in Python -https://deeplearningcourses.com/c/natural-language-processing-with-deep-learning-in-python - -Artificial Intelligence: Reinforcement Learning in Python -https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python - -Advanced AI: Deep Reinforcement Learning in Python -https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python - -Deep Learning: GANs and Variational Autoencoders -https://deeplearningcourses.com/c/deep-learning-gans-and-variational-autoencoders - -Deep Learning: Advanced Computer Vision -https://deeplearningcourses.com/c/advanced-computer-vision - -Deep Learning: Advanced NLP and RNNs -https://deeplearningcourses.com/c/deep-learning-advanced-nlp - -Recommender Systems and Deep Learning in Python -https://deeplearningcourses.com/c/recommender-systems - -Machine Learning and AI: Support Vector Machines in Python -https://deeplearningcourses.com/c/support-vector-machines-in-python From 877acd6c002e0867ec1279ae2ca7cf717ab1e15f Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 21 Jan 2019 03:55:16 -0500 Subject: [PATCH 116/329] update --- svm_class/crossval.py | 23 +++++++++++ svm_class/extra_reading.txt | 17 +++++++- svm_class/fake_neural_net.py | 77 ++++++++++++++++++++++++++++-------- 3 files changed, 100 insertions(+), 17 deletions(-) create mode 100644 svm_class/crossval.py diff --git a/svm_class/crossval.py b/svm_class/crossval.py new file mode 100644 index 00000000..a740d54a --- /dev/null +++ b/svm_class/crossval.py @@ -0,0 +1,23 @@ +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from datetime import datetime +from sklearn.model_selection import cross_val_score +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.datasets import load_breast_cancer +from sklearn.svm import SVC + +# load the data +data = load_breast_cancer() + +for C in (0.5, 1.0, 5.0, 10.0): + pipeline = Pipeline([('scaler', StandardScaler()), ('svm', SVC(C=C))]) + scores = cross_val_score(pipeline, data.data, data.target, cv=5) + print("C:", C, "mean:", scores.mean(), "std:", scores.std()) diff --git a/svm_class/extra_reading.txt b/svm_class/extra_reading.txt index 2538434d..e9f46c12 100644 --- a/svm_class/extra_reading.txt +++ b/svm_class/extra_reading.txt @@ -44,4 +44,19 @@ A Tutorial on Support Vector Regression https://alex.smola.org/papers/2003/SmoSch03b.pdf LIBSVM -- A Library for Support Vector Machines -https://www.csie.ntu.edu.tw/~cjlin/libsvm/ \ No newline at end of file +https://www.csie.ntu.edu.tw/~cjlin/libsvm/ + +Random Features for Large-Scale Kernel Machines +http://www.robots.ox.ac.uk/~vgg/rg/papers/randomfeatures.pdf + +Reflections on Random Kitchen Sinks +http://www.argmin.net/2017/12/05/kitchen-sinks/ + +Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning +https://papers.nips.cc/paper/3495-weighted-sums-of-random-kitchen-sinks-replacing-minimization-with-randomization-in-learning + +Using the Nyström Method to Speed Up Kernel Machines +https://papers.nips.cc/paper/1866-using-the-nystrom-method-to-speed-up-kernel-machines + +Nyström Method vs Random Fourier Features: A Theoretical and Empirical Comparison +https://papers.nips.cc/paper/4588-nystrom-method-vs-random-fourier-features-a-theoretical-and-empirical-comparison \ No newline at end of file diff --git a/svm_class/fake_neural_net.py b/svm_class/fake_neural_net.py index 4ec37219..6f6a8419 100644 --- a/svm_class/fake_neural_net.py +++ b/svm_class/fake_neural_net.py @@ -13,44 +13,83 @@ from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans - -# get the data: https://www.kaggle.com/c/digit-recognizer -Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() +from sklearn.mixture import GaussianMixture +from sklearn.model_selection import cross_val_score +from sklearn.utils import shuffle +from scipy import stats class SigmoidFeaturizer: def __init__(self, gamma=1.0, n_components=100, method='random'): self.M = n_components self.gamma = gamma - assert(method in ('random', 'kmeans')) + assert(method in ('random', 'kmeans', 'gmm')) self.method = method + def _subsample_data(self, X, Y, n=10000): + if Y is not None: + X, Y = shuffle(X, Y) + return X[:n], Y[:n] + else: + X = shuffle(X) + return X[:n] + def fit(self, X, Y=None): if self.method == 'random': N = len(X) idx = np.random.randint(N, size=self.M) self.samples = X[idx] elif self.method == 'kmeans': + X, Y = self._subsample_data(X, Y) + print("Fitting kmeans...") t0 = datetime.now() - kmeans = KMeans(n_clusters=self.M) + kmeans = KMeans(n_clusters=len(set(Y))) kmeans.fit(X) print("Finished fitting kmeans, duration:", datetime.now() - t0) - self.samples = kmeans.cluster_centers_ + + # calculate the most ambiguous points + # we will do this by finding the distance between each point + # and all cluster centers + # and return which points have the smallest variance + dists = kmeans.transform(X) # returns an N x K matrix + variances = dists.var(axis=1) + idx = np.argsort(variances) # smallest to largest + idx = idx[:self.M] + self.samples = X[idx] + elif self.method == 'gmm': + X, Y = self._subsample_data(X, Y) + + print("Fitting GMM") + t0 = datetime.now() + gmm = GaussianMixture(n_components=len(set(Y))) + gmm.fit(X) + print("Finished fitting GMM, duration:", datetime.now() - t0) + + # calculate the most ambiguous points + probs = gmm.predict_proba(X) + ent = stats.entropy(probs.T) # N-length vector of entropies + idx = np.argsort(-ent) # negate since we want biggest first + idx = idx[:self.M] + self.samples = X[idx] return self def transform(self, X): - Z = self.gamma * X.dot(self.samples.T) # (Ntest x D) x (D x Nsamples) -> (Ntest x Nsamples) - return np.tanh(Z) + Z = X.dot(self.samples.T) # (Ntest x D) x (D x Nsamples) -> (Ntest x Nsamples) + return np.tanh(self.gamma * Z) + # return self.gamma * Z * (Z > 0) def fit_transform(self, X, Y=None): return self.fit(X, Y).transform(X) +# get the data: https://www.kaggle.com/c/digit-recognizer +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + # with SGD pipeline = Pipeline([ ('scaler', StandardScaler()), - ('sigmoid', SigmoidFeaturizer(gamma=0.05, n_components=2000, method='random')), + ('sigmoid', SigmoidFeaturizer(gamma=0.05, n_components=2000, method='gmm')), ('linear', SGDClassifier(max_iter=1e6, tol=1e-5)) ]) @@ -63,10 +102,16 @@ def fit_transform(self, X, Y=None): # ]) -t0 = datetime.now() -pipeline.fit(Xtrain, Ytrain) -print("train duration:", datetime.now() - t0) -t0 = datetime.now() -print("train score:", pipeline.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) -t0 = datetime.now() -print("test score:", pipeline.score(Xtest, Ytest), "duration:", datetime.now() - t0) +X = np.vstack((Xtrain, Xtest)) +Y = np.concatenate((Ytrain, Ytest)) +scores = cross_val_score(pipeline, X, Y, cv=5) +print(scores) +print("avg:", np.mean(scores)) + +# t0 = datetime.now() +# pipeline.fit(Xtrain, Ytrain) +# print("train duration:", datetime.now() - t0) +# t0 = datetime.now() +# print("train score:", pipeline.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) +# t0 = datetime.now() +# print("test score:", pipeline.score(Xtest, Ytest), "duration:", datetime.now() - t0) From 0590b9e427079f88f20ecc5e090f17c0030a9441 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 22 Jan 2019 01:38:41 -0500 Subject: [PATCH 117/329] update --- nlp_class2/neural_network2.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nlp_class2/neural_network2.py b/nlp_class2/neural_network2.py index 1f0be410..573dd9e9 100644 --- a/nlp_class2/neural_network2.py +++ b/nlp_class2/neural_network2.py @@ -95,11 +95,14 @@ def softmax(a): # # for reference: # # original: W1 = W1 - lr * inputs.T.dot(dhidden) # VxN NxD --> VxD + # fastest way + np.subtract.at(W1, inputs, lr * dhidden) + # test this - i = 0 - for w in inputs: # don't include end token - W1[w] = W1[w] - lr * dhidden[i] - i += 1 + # i = 0 + # for w in inputs: # don't include end token + # W1[w] = W1[w] - lr * dhidden[i] + # i += 1 # vs this # oh_inputs = np.zeros((n - 1, V)) From 00427e009d6141b029cb0c2075a1ef7d52c79f63 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 23 Jan 2019 15:05:01 -0500 Subject: [PATCH 118/329] update --- linear_regression_class/l1_regularization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linear_regression_class/l1_regularization.py b/linear_regression_class/l1_regularization.py index 3bf34cc3..afadce8c 100644 --- a/linear_regression_class/l1_regularization.py +++ b/linear_regression_class/l1_regularization.py @@ -20,7 +20,7 @@ # true weights - only the first 3 dimensions of X affect Y true_w = np.array([1, 0.5, -0.5] + [0]*(D - 3)) -# generate Y - add noise with variance 0.5 +# generate Y - add noise Y = X.dot(true_w) + np.random.randn(N)*0.5 # perform gradient descent to find w From b3309e6e5cfa9c0abff2047e40b03dcaf3959e33 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 24 Jan 2019 15:05:23 -0500 Subject: [PATCH 119/329] add url --- svm_class/crossval.py | 2 ++ svm_class/fake_neural_net.py | 21 ++++++++++++++++----- svm_class/linear_svm_gradient.py | 2 ++ svm_class/rbfnetwork.py | 2 ++ svm_class/real_neural_net.py | 4 ++++ svm_class/regression.py | 2 ++ svm_class/svm_gradient.py | 2 ++ svm_class/svm_medical.py | 2 ++ svm_class/svm_mnist.py | 2 ++ svm_class/svm_smo.py | 2 ++ svm_class/svm_spam.py | 2 ++ svm_class/util.py | 2 ++ 12 files changed, 40 insertions(+), 5 deletions(-) diff --git a/svm_class/crossval.py b/svm_class/crossval.py index a740d54a..b897083a 100644 --- a/svm_class/crossval.py +++ b/svm_class/crossval.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future diff --git a/svm_class/fake_neural_net.py b/svm_class/fake_neural_net.py index 6f6a8419..591e302f 100644 --- a/svm_class/fake_neural_net.py +++ b/svm_class/fake_neural_net.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future @@ -17,13 +19,14 @@ from sklearn.model_selection import cross_val_score from sklearn.utils import shuffle from scipy import stats +from sklearn.linear_model import LogisticRegression class SigmoidFeaturizer: def __init__(self, gamma=1.0, n_components=100, method='random'): self.M = n_components self.gamma = gamma - assert(method in ('random', 'kmeans', 'gmm')) + assert(method in ('normal', 'random', 'kmeans', 'gmm')) self.method = method def _subsample_data(self, X, Y, n=10000): @@ -39,6 +42,10 @@ def fit(self, X, Y=None): N = len(X) idx = np.random.randint(N, size=self.M) self.samples = X[idx] + elif self.method == 'normal': + # just sample from N(0,1) + D = X.shape[1] + self.samples = np.random.randn(self.M, D) / np.sqrt(D) elif self.method == 'kmeans': X, Y = self._subsample_data(X, Y) @@ -62,7 +69,10 @@ def fit(self, X, Y=None): print("Fitting GMM") t0 = datetime.now() - gmm = GaussianMixture(n_components=len(set(Y))) + gmm = GaussianMixture( + n_components=len(set(Y)), + covariance_type='spherical', + reg_covar=1e-6) gmm.fit(X) print("Finished fitting GMM, duration:", datetime.now() - t0) @@ -89,8 +99,9 @@ def fit_transform(self, X, Y=None): # with SGD pipeline = Pipeline([ ('scaler', StandardScaler()), - ('sigmoid', SigmoidFeaturizer(gamma=0.05, n_components=2000, method='gmm')), - ('linear', SGDClassifier(max_iter=1e6, tol=1e-5)) + ('sigmoid', SigmoidFeaturizer(gamma=0.05, n_components=2000, method='normal')), + # ('linear', SGDClassifier(max_iter=1e6, tol=1e-5)) + ('linear', LogisticRegression()) # takes longer ]) # with Linear SVC @@ -101,7 +112,7 @@ def fit_transform(self, X, Y=None): # ('linear', LinearSVC()) # ]) - +# let's do some cross-validation instead, why not X = np.vstack((Xtrain, Xtest)) Y = np.concatenate((Ytrain, Ytest)) scores = cross_val_score(pipeline, X, Y, cv=5) diff --git a/svm_class/linear_svm_gradient.py b/svm_class/linear_svm_gradient.py index 6fcccae3..a5b4a927 100644 --- a/svm_class/linear_svm_gradient.py +++ b/svm_class/linear_svm_gradient.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from future.utils import iteritems from builtins import range, input diff --git a/svm_class/rbfnetwork.py b/svm_class/rbfnetwork.py index e847666f..1b7c5bd7 100644 --- a/svm_class/rbfnetwork.py +++ b/svm_class/rbfnetwork.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future diff --git a/svm_class/real_neural_net.py b/svm_class/real_neural_net.py index 2fef89ab..215f9d4b 100644 --- a/svm_class/real_neural_net.py +++ b/svm_class/real_neural_net.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future @@ -10,6 +12,7 @@ from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.neural_network import MLPClassifier +from sklearn.linear_model import LogisticRegression # get the data: https://www.kaggle.com/c/digit-recognizer Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() @@ -18,6 +21,7 @@ pipeline = Pipeline([ # ('scaler', StandardScaler()), ('mlp', MLPClassifier(hidden_layer_sizes=(500,), activation='tanh')), + # ('lr', LogisticRegression()), ]) diff --git a/svm_class/regression.py b/svm_class/regression.py index 1fb7b4b2..4e1770f3 100644 --- a/svm_class/regression.py +++ b/svm_class/regression.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future diff --git a/svm_class/svm_gradient.py b/svm_class/svm_gradient.py index 65c9b2c1..1a978363 100644 --- a/svm_class/svm_gradient.py +++ b/svm_class/svm_gradient.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from future.utils import iteritems from builtins import range, input diff --git a/svm_class/svm_medical.py b/svm_class/svm_medical.py index bee52069..f9342760 100644 --- a/svm_class/svm_medical.py +++ b/svm_class/svm_medical.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from future.utils import iteritems from builtins import range, input diff --git a/svm_class/svm_mnist.py b/svm_class/svm_mnist.py index 663b5847..9a71a650 100644 --- a/svm_class/svm_mnist.py +++ b/svm_class/svm_mnist.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future diff --git a/svm_class/svm_smo.py b/svm_class/svm_smo.py index 6ab5c372..b7227268 100644 --- a/svm_class/svm_smo.py +++ b/svm_class/svm_smo.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from future.utils import iteritems from builtins import range, input diff --git a/svm_class/svm_spam.py b/svm_class/svm_spam.py index af6086c1..78d3f106 100644 --- a/svm_class/svm_spam.py +++ b/svm_class/svm_spam.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future diff --git a/svm_class/util.py b/svm_class/util.py index a40b557b..04e8003b 100644 --- a/svm_class/util.py +++ b/svm_class/util.py @@ -1,3 +1,5 @@ +# https://deeplearningcourses.com/c/support-vector-machines-in-python +# https://www.udemy.com/support-vector-machines-in-python from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future From 273b0db34122a49138f33cf1458eb9ec30c05a5b Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 25 Jan 2019 15:47:10 -0500 Subject: [PATCH 120/329] update --- svm_class/svm_smo.py | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/svm_class/svm_smo.py b/svm_class/svm_smo.py index b7227268..7b1a8873 100644 --- a/svm_class/svm_smo.py +++ b/svm_class/svm_smo.py @@ -36,10 +36,9 @@ def sigmoid(X1, X2, gamma=0.05, c=1): class SVM: - def __init__(self, kernel, C=1.0, right=True): + def __init__(self, kernel, C=1.0): self.kernel = kernel self.C = C - self.right = right def _loss(self, X, Y): # return -np.sum(self.alphas) + \ @@ -247,10 +246,7 @@ def fit(self, X, Y, tol=0.00001, eps=0.01): plt.show() def _decision_function(self, X): - if self.right: - return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X)) - self.b - else: - return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X) - self.b) + return (self.alphas * self.Ytrain).dot(self.kernel(self.Xtrain, X)) - self.b def predict(self, X): @@ -293,17 +289,15 @@ def get_data(): Xtest = scaler.transform(Xtest) # now we'll use our custom implementation - for right in (True,): - print("Right:", right) - model = SVM(kernel=linear, right=right) - - t0 = datetime.now() - model.fit(Xtrain, Ytrain) - print("train duration:", datetime.now() - t0) - t0 = datetime.now() - print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) - t0 = datetime.now() - print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) + model = SVM(kernel=linear) + + t0 = datetime.now() + model.fit(Xtrain, Ytrain) + print("train duration:", datetime.now() - t0) + t0 = datetime.now() + print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0) + t0 = datetime.now() + print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0) if Xtrain.shape[1] == 2: plot_decision_boundary(model) From be04d8cc2d1f2e1a90d3112ecf026dea4aaef512 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 26 Jan 2019 19:52:15 -0500 Subject: [PATCH 121/329] add gaussian nb unsupervised deep learning --- unsupervised_class2/gaussian_nb.py | 62 ++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 unsupervised_class2/gaussian_nb.py diff --git a/unsupervised_class2/gaussian_nb.py b/unsupervised_class2/gaussian_nb.py new file mode 100644 index 00000000..e4a1601f --- /dev/null +++ b/unsupervised_class2/gaussian_nb.py @@ -0,0 +1,62 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from sklearn.decomposition import PCA +# from sklearn.naive_bayes import GaussianNB # doesn't have smoothing +from scipy.stats import norm +from scipy.stats import multivariate_normal as mvn +from util import getKaggleMNIST + + +class GaussianNB(object): + def fit(self, X, Y, smoothing=1e-2): + self.gaussians = dict() + self.priors = dict() + labels = set(Y) + for c in labels: + current_x = X[Y == c] + self.gaussians[c] = { + 'mean': current_x.mean(axis=0), + 'var': current_x.var(axis=0) + smoothing, + } + self.priors[c] = float(len(Y[Y == c])) / len(Y) + + def score(self, X, Y): + P = self.predict(X) + return np.mean(P == Y) + + def predict(self, X): + N, D = X.shape + K = len(self.gaussians) + P = np.zeros((N, K)) + for c, g in iteritems(self.gaussians): + mean, var = g['mean'], g['var'] + P[:,c] = mvn.logpdf(X, mean=mean, cov=var) + np.log(self.priors[c]) + return np.argmax(P, axis=1) + + +# get data +Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST() + +# try NB by itself +model1 = GaussianNB() +model1.fit(Xtrain, Ytrain) +print("NB train score:", model1.score(Xtrain, Ytrain)) +print("NB test score:", model1.score(Xtest, Ytest)) + +# try NB with PCA first +pca = PCA(n_components=50) +Ztrain = pca.fit_transform(Xtrain) +Ztest = pca.transform(Xtest) + +model2 = GaussianNB() +model2.fit(Ztrain, Ytrain) +print("NB+PCA train score:", model2.score(Ztrain, Ytrain)) +print("NB+PCA test score:", model2.score(Ztest, Ytest)) From 365e9e96dba6b4f0e23d94b23ab38b8744a9e64d Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 26 Jan 2019 19:53:03 -0500 Subject: [PATCH 122/329] compare pca svd --- unsupervised_class2/compare_pca_svd.py | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 unsupervised_class2/compare_pca_svd.py diff --git a/unsupervised_class2/compare_pca_svd.py b/unsupervised_class2/compare_pca_svd.py new file mode 100644 index 00000000..8d08e38d --- /dev/null +++ b/unsupervised_class2/compare_pca_svd.py @@ -0,0 +1,31 @@ +# https://deeplearningcourses.com/c/unsupervised-deep-learning-in-python +# https://www.udemy.com/unsupervised-deep-learning-in-python +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +from sklearn.decomposition import PCA, TruncatedSVD +from util import getKaggleMNIST + + +X, Y, _, _ = getKaggleMNIST() +m = X.mean(axis=0) +s = X.std(axis=0) +np.place(s, s == 0, 1) +X = (X - m) / s + +pca = PCA() +svd = TruncatedSVD() + +Z1 = pca.fit_transform(X) +Z2 = svd.fit_transform(X) + +plt.subplot(1,2,1) +plt.scatter(Z1[:,0], Z1[:,1], c=Y) +plt.subplot(1,2,2) +plt.scatter(Z2[:,0], Z2[:,1], c=Y) +plt.show() From f2438a1a9d03cd9be430fa7da6d8e081c60a1612 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 27 Jan 2019 14:04:51 -0500 Subject: [PATCH 123/329] add sk_mlp --- unsupervised_class2/sk_mlp.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 unsupervised_class2/sk_mlp.py diff --git a/unsupervised_class2/sk_mlp.py b/unsupervised_class2/sk_mlp.py new file mode 100644 index 00000000..f2e1d393 --- /dev/null +++ b/unsupervised_class2/sk_mlp.py @@ -0,0 +1,33 @@ +# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow +# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from sklearn.neural_network import MLPRegressor +from util import getKaggleMNIST + + + +# get data +X, _, Xt, _ = getKaggleMNIST() + +# create the model and train it +model = MLPRegressor() +model.fit(X, X) + +# test the model +print("Train R^2:", model.score(X, X)) +print("Test R^2:", model.score(Xt, Xt)) + +Xhat = model.predict(X) +mse = ((Xhat - X)**2).mean() +print("Train MSE:", mse) + +Xhat = model.predict(Xt) +mse = ((Xhat - Xt)**2).mean() +print("Test MSE:", mse) \ No newline at end of file From 6472fd9fee080cfe9aa5307764e19c96a7fe2750 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 4 Feb 2019 02:28:21 -0500 Subject: [PATCH 124/329] gradient descent exercise --- linear_regression_class/gd.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 linear_regression_class/gd.py diff --git a/linear_regression_class/gd.py b/linear_regression_class/gd.py new file mode 100644 index 00000000..04b36c1c --- /dev/null +++ b/linear_regression_class/gd.py @@ -0,0 +1,25 @@ +import matplotlib.pyplot as plt + +lr = 1e-2 +x1 = 5 +x2 = -5 + +def J(x1, x2): + return x1**2 + x2**4 + +def g1(x1): + return 2*x1 + +def g2(x2): + return 4*x2**3 + +values = [] +for i in range(1000): + values.append(J(x1, x2)) + x1 -= lr * g1(x1) + x2 -= lr * g2(x2) +values.append(J(x1, x2)) + +print(x1, x2) +plt.plot(values) +plt.show() \ No newline at end of file From 741152679b42fcfbfa0ff4c3098b210df9fb251d Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 7 Feb 2019 15:14:02 -0500 Subject: [PATCH 125/329] update --- unsupervised_class2/extra_reading.txt | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 unsupervised_class2/extra_reading.txt diff --git a/unsupervised_class2/extra_reading.txt b/unsupervised_class2/extra_reading.txt new file mode 100644 index 00000000..24c20d3e --- /dev/null +++ b/unsupervised_class2/extra_reading.txt @@ -0,0 +1,32 @@ +Visualizing Data using t-SNE +http://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf + +Reducing the Dimensionality of Data with Neural Networks +https://www.cs.toronto.edu/~hinton/science.pdf + +A fast learning algorithm for deep belief nets +https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf + +Why Does Unsupervised Pre-training Help Deep Learning? +http://www.jmlr.org/papers/volume11/erhan10a/erhan10a.pdf + +A Better Way to Pretrain Deep Boltzmann Machines +http://www.cs.toronto.edu/~hinton/absps/DBM_pretrain.pdf + +On Deep Generative Models with Applications to Recognition +http://www.cs.toronto.edu/~hinton/absps/ranzato_cvpr2011.pdf + +LEARNING A BETTER REPRESENTATION OF SPEECH SOUND WAVES USING RESTRICTED BOLTZMANN MACHINES +http://www.cs.toronto.edu/~hinton/absps/jaitly_ICASSP2011.pdf + +Rectified Linear Units Improve Restricted Boltzmann Machines +http://www.cs.toronto.edu/~hinton/absps/reluICML.pdf + +Generative versus discriminative training of RBMs for classification of fMRI images +http://www.cs.toronto.edu/~hinton/absps/fmrinips.pdf + +Restricted Boltzmann Machines for Collaborative Filtering +http://www.cs.toronto.edu/~hinton/absps/netflix.pdf + +On Contrastive Divergence Learning +http://www.cs.toronto.edu/~hinton/absps/cdmiguel.pdf \ No newline at end of file From 81300d646009c16e2b4e370aecbd52ef116f7e92 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 8 Feb 2019 22:36:27 -0500 Subject: [PATCH 126/329] update --- rl2/atari/dqn_tf.py | 225 ++++++++++++++++------ rl2/atari/dqn_tf_alt.py | 399 ---------------------------------------- rl2/atari/dqn_theano.py | 226 ++++++++++++++++------- 3 files changed, 331 insertions(+), 519 deletions(-) delete mode 100644 rl2/atari/dqn_tf_alt.py diff --git a/rl2/atari/dqn_tf.py b/rl2/atari/dqn_tf.py index 64acbfb6..3894fda9 100644 --- a/rl2/atari/dqn_tf.py +++ b/rl2/atari/dqn_tf.py @@ -29,27 +29,119 @@ MAX_EXPERIENCES = 500000 MIN_EXPERIENCES = 50000 TARGET_UPDATE_PERIOD = 10000 -IM_SIZE = 80 +IM_SIZE = 84 K = 4 #env.action_space.n - - -def downsample_image(A): - B = A[31:195] # select the important parts of the image - B = B.mean(axis=2) # convert to grayscale - - # downsample image - # changing aspect ratio doesn't significantly distort the image - # nearest neighbor interpolation produces a much sharper image - # than default bilinear - B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest') - return B - - -def update_state(state, obs): - obs_small = downsample_image(obs) - return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) +# Transform raw images for input into neural network +# 1) Convert to grayscale +# 2) Resize +# 3) Crop +class ImageTransformer: + def __init__(self): + with tf.variable_scope("image_transformer"): + self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8) + self.output = tf.image.rgb_to_grayscale(self.input_state) + self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160) + self.output = tf.image.resize_images( + self.output, + [IM_SIZE, IM_SIZE], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) + self.output = tf.squeeze(self.output) + + def transform(self, state, sess=None): + sess = sess or tf.get_default_session() + return sess.run(self.output, { self.input_state: state }) + + +def update_state(state, obs_small): + return np.append(state[:,:,1:], np.expand_dims(obs_small, 2), axis=2) + + + +class ReplayMemory: + def __init__(self, size=500000, frame_height=IM_SIZE, frame_width=IM_SIZE, + agent_history_length=4, batch_size=32): + """ + Args: + size: Integer, Number of stored transitions + frame_height: Integer, Height of a frame of an Atari game + frame_width: Integer, Width of a frame of an Atari game + agent_history_length: Integer, Number of frames stacked together to create a state + batch_size: Integer, Number of transitions returned in a minibatch + """ + self.size = size + self.frame_height = frame_height + self.frame_width = frame_width + self.agent_history_length = agent_history_length + self.batch_size = batch_size + self.count = 0 + self.current = 0 + + # Pre-allocate memory + self.actions = np.empty(self.size, dtype=np.int32) + self.rewards = np.empty(self.size, dtype=np.float32) + self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8) + self.terminal_flags = np.empty(self.size, dtype=np.bool) + + # Pre-allocate memory for the states and new_states in a minibatch + self.states = np.empty((self.batch_size, self.agent_history_length, + self.frame_height, self.frame_width), dtype=np.uint8) + self.new_states = np.empty((self.batch_size, self.agent_history_length, + self.frame_height, self.frame_width), dtype=np.uint8) + self.indices = np.empty(self.batch_size, dtype=np.int32) + + def add_experience(self, action, frame, reward, terminal): + """ + Args: + action: An integer-encoded action + frame: One grayscale frame of the game + reward: reward the agend received for performing an action + terminal: A bool stating whether the episode terminated + """ + if frame.shape != (self.frame_height, self.frame_width): + raise ValueError('Dimension of frame is wrong!') + self.actions[self.current] = action + self.frames[self.current, ...] = frame + self.rewards[self.current] = reward + self.terminal_flags[self.current] = terminal + self.count = max(self.count, self.current+1) + self.current = (self.current + 1) % self.size + + def _get_state(self, index): + if self.count is 0: + raise ValueError("The replay memory is empty!") + if index < self.agent_history_length - 1: + raise ValueError("Index must be min 3") + return self.frames[index-self.agent_history_length+1:index+1, ...] + + def _get_valid_indices(self): + for i in range(self.batch_size): + while True: + index = random.randint(self.agent_history_length, self.count - 1) + if index < self.agent_history_length: + continue + if index >= self.current and index - self.agent_history_length <= self.current: + continue + if self.terminal_flags[index - self.agent_history_length:index].any(): + continue + break + self.indices[i] = index + + def get_minibatch(self): + """ + Returns a minibatch of self.batch_size transitions + """ + if self.count < self.agent_history_length: + raise ValueError('Not enough memories to get a minibatch') + + self._get_valid_indices() + + for i, idx in enumerate(self.indices): + self.states[i] = self._get_state(idx - 1) + self.new_states[i] = self._get_state(idx) + + return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices] class DQN: @@ -61,11 +153,11 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope): with tf.variable_scope(scope): # inputs and targets - self.X = tf.placeholder(tf.float32, shape=(None, 4, IM_SIZE, IM_SIZE), name='X') + self.X = tf.placeholder(tf.float32, shape=(None, IM_SIZE, IM_SIZE, 4), name='X') # tensorflow convolution needs the order to be: # (num_samples, height, width, "color") - # so we need to tranpose later + self.G = tf.placeholder(tf.float32, shape=(None,), name='G') self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions') @@ -74,7 +166,6 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope): # these built-in layers are faster and don't require us to # calculate the size of the output of the final conv layer! Z = self.X / 255.0 - Z = tf.transpose(Z, [0, 2, 3, 1]) for num_output_filters, filtersz, poolsz in conv_layer_sizes: Z = tf.contrib.layers.conv2d( Z, @@ -97,16 +188,18 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope): reduction_indices=[1] ) - cost = tf.reduce_mean(tf.square(self.G - selected_action_values)) - # self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost) + # cost = tf.reduce_mean(tf.square(self.G - selected_action_values)) + cost = tf.reduce_mean(tf.losses.huber_loss(self.G, selected_action_values)) + self.train_op = tf.train.AdamOptimizer(1e-5).minimize(cost) # self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost) # self.train_op = tf.train.RMSPropOptimizer(2.5e-4, decay=0.99, epsilon=1e-3).minimize(cost) - self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost) + # self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost) # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost) # self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost) self.cost = cost + def copy_from(self, other): mine = [t for t in tf.trainable_variables() if t.name.startswith(self.scope)] mine = sorted(mine, key=lambda v: v.name) @@ -115,12 +208,26 @@ def copy_from(self, other): ops = [] for p, q in zip(mine, theirs): - actual = self.session.run(q) - op = p.assign(actual) + op = p.assign(q) ops.append(op) + self.session.run(ops) + + + def save(self): + params = [t for t in tf.trainable_variables() if t.name.startswith(self.scope)] + params = self.session.run(params) + np.savez('tf_dqn_weights.npz', *params) + + def load(self): + params = [t for t in tf.trainable_variables() if t.name.startswith(self.scope)] + npz = np.load('tf_dqn_weights.npz') + ops = [] + for p, (_, v) in zip(params, npz.iteritems()): + ops.append(p.assign(v)) self.session.run(ops) + def set_session(self, session): self.session = session @@ -147,8 +254,7 @@ def sample_action(self, x, eps): def learn(model, target_model, experience_replay_buffer, gamma, batch_size): # Sample experiences - samples = random.sample(experience_replay_buffer, batch_size) - states, actions, rewards, next_states, dones = map(np.array, zip(*samples)) + states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch() # Calculate targets next_Qs = target_model.predict(next_states) @@ -162,10 +268,12 @@ def learn(model, target_model, experience_replay_buffer, gamma, batch_size): def play_one( env, + sess, total_t, experience_replay_buffer, model, target_model, + image_transformer, gamma, batch_size, epsilon, @@ -176,9 +284,8 @@ def play_one( # Reset the environment obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - assert(state.shape == (4, 80, 80)) + obs_small = image_transformer.transform(obs, sess) + state = np.stack([obs_small] * 4, axis=2) loss = None @@ -198,26 +305,21 @@ def play_one( # Take action action = model.sample_action(state, epsilon) obs, reward, done, _ = env.step(action) - obs_small = downsample_image(obs) - next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) - # assert(state.shape == (4, 80, 80)) - - + obs_small = image_transformer.transform(obs, sess) + next_state = update_state(state, obs_small) + # Compute total reward episode_reward += reward - # Remove oldest experience if replay buffer is full - if len(experience_replay_buffer) == MAX_EXPERIENCES: - experience_replay_buffer.pop(0) - # Save the latest experience - experience_replay_buffer.append((state, action, reward, next_state, done)) + experience_replay_buffer.add_experience(action, obs_small, reward, done) # Train the model, keep track of time t0_2 = datetime.now() loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size) dt = datetime.now() - t0_2 + # More debugging info total_time_training += dt.total_seconds() num_steps_in_episode += 1 @@ -230,6 +332,15 @@ def play_one( return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + if __name__ == '__main__': @@ -238,9 +349,9 @@ def play_one( hidden_layer_sizes = [512] gamma = 0.99 batch_sz = 32 - num_episodes = 10000 + num_episodes = 3500 total_t = 0 - experience_replay_buffer = [] + experience_replay_buffer = ReplayMemory() episode_rewards = np.zeros(num_episodes) @@ -272,6 +383,7 @@ def play_one( gamma=gamma, scope="target_model" ) + image_transformer = ImageTransformer() @@ -283,35 +395,30 @@ def play_one( print("Populating experience replay buffer...") obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - # assert(state.shape == (4, 80, 80)) + for i in range(MIN_EXPERIENCES): action = np.random.choice(K) obs, reward, done, _ = env.step(action) - next_state = update_state(state, obs) - # assert(state.shape == (4, 80, 80)) - experience_replay_buffer.append((state, action, reward, next_state, done)) + obs_small = image_transformer.transform(obs, sess) # not used anymore + experience_replay_buffer.add_experience(action, obs_small, reward, done) if done: obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - # assert(state.shape == (4, 80, 80)) - else: - state = next_state # Play a number of episodes and learn! + t0 = datetime.now() for i in range(num_episodes): total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one( env, + sess, total_t, experience_replay_buffer, model, target_model, + image_transformer, gamma, batch_sz, epsilon, @@ -330,5 +437,15 @@ def play_one( "Epsilon:", "%.3f" % epsilon ) sys.stdout.flush() + print("Total duration:", datetime.now() - t0) + + model.save() + + # Plot the smoothed returns + y = smooth(episode_rewards) + plt.plot(episode_rewards, label='orig') + plt.plot(y, label='smoothed') + plt.legend() + plt.show() diff --git a/rl2/atari/dqn_tf_alt.py b/rl2/atari/dqn_tf_alt.py deleted file mode 100644 index aaf3e4ef..00000000 --- a/rl2/atari/dqn_tf_alt.py +++ /dev/null @@ -1,399 +0,0 @@ -# https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python -# https://www.udemy.com/deep-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - -import copy -import gym -import os -import sys -import random -import numpy as np -import tensorflow as tf -import matplotlib.pyplot as plt -from gym import wrappers -from datetime import datetime -from scipy.misc import imresize - - - - - -##### testing only -# MAX_EXPERIENCES = 10000 -# MIN_EXPERIENCES = 1000 - - -MAX_EXPERIENCES = 500000 -MIN_EXPERIENCES = 50000 -TARGET_UPDATE_PERIOD = 10000 -IM_SIZE = 80 -K = 4 #env.action_space.n - - - - -def downsample_image(A): - B = A[31:195] # select the important parts of the image - B = B.mean(axis=2) # convert to grayscale - - # downsample image - # changing aspect ratio doesn't significantly distort the image - # nearest neighbor interpolation produces a much sharper image - # than default bilinear - B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest') - return B - - -def update_state(state, obs): - obs_small = downsample_image(obs) - return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) - - - - - -class ConvLayer: - def __init__(self, mi, mo, filtersz=5, stride=2, f=tf.nn.relu): - # mi = input feature map size - # mo = output feature map size - self.W = tf.Variable(tf.random_normal(shape=(filtersz, filtersz, mi, mo))) - b0 = np.zeros(mo, dtype=np.float32) - self.b = tf.Variable(b0) - self.f = f - self.stride = stride - self.params = [self.W, self.b] - - def forward(self, X): - conv_out = tf.nn.conv2d(X, self.W, strides=[1, self.stride, self.stride, 1], padding='SAME') - conv_out = tf.nn.bias_add(conv_out, self.b) - return self.f(conv_out) - - -class HiddenLayer: - def __init__(self, M1, M2, f=tf.nn.relu, use_bias=True): - # print("M1:", M1) - self.W = tf.Variable(tf.random_normal(shape=(M1, M2))) - self.params = [self.W] - self.use_bias = use_bias - if use_bias: - self.b = tf.Variable(np.zeros(M2).astype(np.float32)) - self.params.append(self.b) - self.f = f - - def forward(self, X): - if self.use_bias: - a = tf.matmul(X, self.W) + self.b - else: - a = tf.matmul(X, self.W) - return self.f(a) - - -class DQN: - # def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, max_experiences=500000, min_experiences=50000, batch_sz=32): - def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): - self.K = K - - # create the graph - self.conv_layers = [] - num_input_filters = 4 # number of filters / color channels - final_height = IM_SIZE - final_width = IM_SIZE - for num_output_filters, filtersz, stride in conv_layer_sizes: - layer = ConvLayer(num_input_filters, num_output_filters, filtersz, stride) - self.conv_layers.append(layer) - num_input_filters = num_output_filters - - # calculate final output size for input into fully connected layers - old_height = final_height - new_height = int(np.ceil(old_height / stride)) - print("new_height (%s) = old_height (%s) / stride (%s)" % (new_height, old_height, stride)) - final_height = int(np.ceil(final_height / stride)) - final_width = int(np.ceil(final_width / stride)) - - self.layers = [] - flattened_ouput_size = final_height * final_width * num_input_filters - M1 = flattened_ouput_size - for M2 in hidden_layer_sizes: - layer = HiddenLayer(M1, M2) - self.layers.append(layer) - M1 = M2 - - # final layer - layer = HiddenLayer(M1, K, lambda x: x) - self.layers.append(layer) - - # collect params for copy - self.params = [] - for layer in (self.conv_layers + self.layers): - self.params += layer.params - - # inputs and targets - self.X = tf.placeholder(tf.float32, shape=(None, 4, IM_SIZE, IM_SIZE), name='X') - # tensorflow convolution needs the order to be: - # (num_samples, height, width, "color") - # so we need to tranpose later - self.G = tf.placeholder(tf.float32, shape=(None,), name='G') - self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions') - - # calculate output and cost - Z = self.X / 255.0 - Z = tf.transpose(Z, [0, 2, 3, 1]) # TF wants the "color" channel to be last - for layer in self.conv_layers: - Z = layer.forward(Z) - Z = tf.reshape(Z, [-1, flattened_ouput_size]) - for layer in self.layers: - Z = layer.forward(Z) - Y_hat = Z - self.predict_op = Y_hat - - # selected_action_values = tf.reduce_sum( - # Y_hat * tf.one_hot(self.actions, K), - # reduction_indices=[1] - # ) - - # we would like to do this, but it doesn't work in TF: - # selected_action_values = Y_hat[tf.range(batch_sz), self.actions] - # instead we do: - indices = tf.range(batch_sz) * tf.shape(Y_hat)[1] + self.actions - selected_action_values = tf.gather( - tf.reshape(Y_hat, [-1]), # flatten - indices - ) - - cost = tf.reduce_mean(tf.square(self.G - selected_action_values)) - self.cost = cost - # self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost) - # self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost) - self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost) - # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost) - # self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost) - - def set_session(self, session): - self.session = session - - def copy_from(self, other): - # collect all the ops - ops = [] - my_params = self.params - other_params = other.params - for p, q in zip(my_params, other_params): - actual = self.session.run(q) - op = p.assign(actual) - ops.append(op) - # now run them all - self.session.run(ops) - - def predict(self, X): - return self.session.run(self.predict_op, feed_dict={self.X: X}) - - def update(self, states, actions, targets): - c, _ = self.session.run( - [self.cost, self.train_op], - feed_dict={ - self.X: states, - self.G: targets, - self.actions: actions - } - ) - return c - - def sample_action(self, x, eps): - if np.random.random() < eps: - return np.random.choice(self.K) - else: - return np.argmax(self.predict([x])[0]) - - - - - -def learn(model, target_model, experience_replay_buffer, gamma, batch_size): - # Sample experiences - samples = random.sample(experience_replay_buffer, batch_size) - states, actions, rewards, next_states, dones = map(np.array, zip(*samples)) - - # Calculate targets - next_Qs = target_model.predict(next_states) - next_Q = np.amax(next_Qs, axis=1) - targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q - - # Update model - loss = model.update(states, actions, targets) - return loss - - -def play_one( - env, - total_t, - experience_replay_buffer, - model, - target_model, - gamma, - batch_size, - epsilon, - epsilon_change, - epsilon_min): - - t0 = datetime.now() - - # Reset the environment - obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - assert(state.shape == (4, 80, 80)) - loss = None - - - total_time_training = 0 - num_steps_in_episode = 0 - episode_reward = 0 - - done = False - while not done: - - # Update target network - if total_t % TARGET_UPDATE_PERIOD == 0: - target_model.copy_from(model) - print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD)) - - - # Take action - action = model.sample_action(state, epsilon) - obs, reward, done, _ = env.step(action) - obs_small = downsample_image(obs) - next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) - # assert(state.shape == (4, 80, 80)) - - - - episode_reward += reward - - # Remove oldest experience if replay buffer is full - if len(experience_replay_buffer) == MAX_EXPERIENCES: - experience_replay_buffer.pop(0) - - # Save the latest experience - experience_replay_buffer.append((state, action, reward, next_state, done)) - - # Train the model, keep track of time - t0_2 = datetime.now() - loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size) - dt = datetime.now() - t0_2 - - total_time_training += dt.total_seconds() - num_steps_in_episode += 1 - - - state = next_state - total_t += 1 - - epsilon = max(epsilon - epsilon_change, epsilon_min) - - return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon - - - -if __name__ == '__main__': - - # hyperparams and initialize stuff - conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)] - hidden_layer_sizes = [512] - gamma = 0.99 - batch_sz = 32 - num_episodes = 10000 - total_t = 0 - experience_replay_buffer = [] - episode_rewards = np.zeros(num_episodes) - - - - # epsilon - # decays linearly until 0.1 - epsilon = 1.0 - epsilon_min = 0.1 - epsilon_change = (epsilon - epsilon_min) / 500000 - - - - # Create environment - env = gym.envs.make("Breakout-v0") - - - - # Create models - model = DQN( - K=K, - conv_layer_sizes=conv_layer_sizes, - hidden_layer_sizes=hidden_layer_sizes, - gamma=gamma, - # scope="model" - ) - target_model = DQN( - K=K, - conv_layer_sizes=conv_layer_sizes, - hidden_layer_sizes=hidden_layer_sizes, - gamma=gamma, - # scope="target_model" - ) - - - - with tf.Session() as sess: - model.set_session(sess) - target_model.set_session(sess) - sess.run(tf.global_variables_initializer()) - - - print("Populating experience replay buffer...") - obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - # assert(state.shape == (4, 80, 80)) - for i in range(MIN_EXPERIENCES): - - action = np.random.choice(K) - obs, reward, done, _ = env.step(action) - next_state = update_state(state, obs) - # assert(state.shape == (4, 80, 80)) - experience_replay_buffer.append((state, action, reward, next_state, done)) - - if done: - obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - # assert(state.shape == (4, 80, 80)) - else: - state = next_state - - - # Play a number of episodes and learn! - for i in range(num_episodes): - - total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one( - env, - total_t, - experience_replay_buffer, - model, - target_model, - gamma, - batch_sz, - epsilon, - epsilon_change, - epsilon_min, - ) - episode_rewards[i] = episode_reward - - last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean() - print("Episode:", i, - "Duration:", duration, - "Num steps:", num_steps_in_episode, - "Reward:", episode_reward, - "Training time per step:", "%.3f" % time_per_step, - "Avg Reward (Last 100):", "%.3f" % last_100_avg, - "Epsilon:", "%.3f" % epsilon - ) - sys.stdout.flush() - diff --git a/rl2/atari/dqn_theano.py b/rl2/atari/dqn_theano.py index a301cebd..f04c1772 100644 --- a/rl2/atari/dqn_theano.py +++ b/rl2/atari/dqn_theano.py @@ -22,7 +22,6 @@ - ##### testing only # MAX_EXPERIENCES = 10000 # MIN_EXPERIENCES = 1000 @@ -31,16 +30,21 @@ MAX_EXPERIENCES = 500000 MIN_EXPERIENCES = 50000 TARGET_UPDATE_PERIOD = 10000 -IM_SIZE = 80 +IM_SIZE = 84 K = 4 #env.action_space.n +def rgb2gray(rgb): + r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2] + gray = 0.2989 * r + 0.5870 * g + 0.1140 * b + return gray.astype(np.uint8) +# TODO: can this be converted into a Theano function? def downsample_image(A): - B = A[31:195] # select the important parts of the image - B = B.mean(axis=2) # convert to grayscale - + B = A[34:194] # select the important parts of the image + B = rgb2gray(B) # convert to grayscale + # downsample image # changing aspect ratio doesn't significantly distort the image # nearest neighbor interpolation produces a much sharper image @@ -54,7 +58,89 @@ def update_state(state, obs): return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) - +class ReplayMemory: + def __init__(self, size=500000, frame_height=IM_SIZE, frame_width=IM_SIZE, + agent_history_length=4, batch_size=32): + """ + Args: + size: Integer, Number of stored transitions + frame_height: Integer, Height of a frame of an Atari game + frame_width: Integer, Width of a frame of an Atari game + agent_history_length: Integer, Number of frames stacked together to create a state + batch_size: Integer, Number of transitions returned in a minibatch + """ + self.size = size + self.frame_height = frame_height + self.frame_width = frame_width + self.agent_history_length = agent_history_length + self.batch_size = batch_size + self.count = 0 + self.current = 0 + + # Pre-allocate memory + self.actions = np.empty(self.size, dtype=np.int32) + self.rewards = np.empty(self.size, dtype=np.float32) + self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8) + self.terminal_flags = np.empty(self.size, dtype=np.bool) + + # Pre-allocate memory for the states and new_states in a minibatch + self.states = np.empty((self.batch_size, self.agent_history_length, + self.frame_height, self.frame_width), dtype=np.uint8) + self.new_states = np.empty((self.batch_size, self.agent_history_length, + self.frame_height, self.frame_width), dtype=np.uint8) + self.indices = np.empty(self.batch_size, dtype=np.int32) + + def add_experience(self, action, frame, reward, terminal): + """ + Args: + action: An integer-encoded action + frame: One grayscale frame of the game + reward: reward the agend received for performing an action + terminal: A bool stating whether the episode terminated + """ + if frame.shape != (self.frame_height, self.frame_width): + raise ValueError('Dimension of frame is wrong!') + self.actions[self.current] = action + self.frames[self.current, ...] = frame + self.rewards[self.current] = reward + self.terminal_flags[self.current] = terminal + self.count = max(self.count, self.current+1) + self.current = (self.current + 1) % self.size + + def _get_state(self, index): + if self.count is 0: + raise ValueError("The replay memory is empty!") + if index < self.agent_history_length - 1: + raise ValueError("Index must be min 3") + return self.frames[index-self.agent_history_length+1:index+1, ...] + + def _get_valid_indices(self): + for i in range(self.batch_size): + while True: + index = random.randint(self.agent_history_length, self.count - 1) + if index < self.agent_history_length: + continue + if index >= self.current and index - self.agent_history_length <= self.current: + continue + if self.terminal_flags[index - self.agent_history_length:index].any(): + continue + break + self.indices[i] = index + + def get_minibatch(self): + """ + Returns a minibatch of self.batch_size transitions + """ + if self.count < self.agent_history_length: + raise ValueError('Not enough memories to get a minibatch') + + self._get_valid_indices() + + for i, idx in enumerate(self.indices): + self.states[i] = self._get_state(idx - 1) + self.new_states[i] = self._get_state(idx) + + return self.states, self.actions[self.indices], self.rewards[self.indices], self.new_states, self.terminal_flags[self.indices] def init_filter(shape): @@ -62,6 +148,33 @@ def init_filter(shape): return w.astype(np.float32) +def adam(cost, params, lr0=1e-5, beta1=0.9, beta2=0.999, eps=1e-8): + # cast + lr0 = np.float32(lr0) + beta1 = np.float32(beta1) + beta2 = np.float32(beta2) + eps = np.float32(eps) + one = np.float32(1) + zero = np.float32(0) + + grads = T.grad(cost, params) + updates = [] + time = theano.shared(zero) + new_time = time + one + updates.append((time, new_time)) + lr = lr0*T.sqrt(one - beta2**new_time) / (one - beta1**new_time) + for p, g in zip(params, grads): + m = theano.shared(p.get_value() * zero) + v = theano.shared(p.get_value() * zero) + new_m = beta1*m + (one - beta1)*g + new_v = beta2*v + (one - beta2)*g*g + new_p = p - lr*new_m / (T.sqrt(new_v) + eps) + updates.append((m, new_m)) + updates.append((v, new_v)) + updates.append((p, new_p)) + return updates + + class ConvLayer(object): def __init__(self, mi, mo, filtsz=5, stride=2, f=T.nnet.relu): # mi = input feature map size @@ -74,17 +187,21 @@ def __init__(self, mi, mo, filtsz=5, stride=2, f=T.nnet.relu): self.stride = (stride, stride) self.params = [self.W, self.b] self.f = f + # self.cut = cut def forward(self, X): conv_out = conv2d( input=X, filters=self.W, subsample=self.stride, - border_mode='half', + # border_mode='half', + border_mode='valid', ) # cut off 1 pixel from each edge # to make the output the same size as input # like tensorflow + # if self.cut: + # conv_out = conv_out[:, : ,:self.cut ,:self.cut] return self.f(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) class HiddenLayer: @@ -102,10 +219,6 @@ def forward(self, X): class DQN: def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): self.K = K - lr = np.float32(2.5e-4) - mu = np.float32(0) - decay = np.float32(0.99) - eps = np.float32(1e-10) # inputs and targets X = T.ftensor4('X') @@ -115,24 +228,18 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): # create the graph self.conv_layers = [] num_input_filters = 4 # number of filters / color channels + current_size = IM_SIZE for num_output_filters, filtersz, stride in conv_layer_sizes: + ### not using this currently, it didn't make a difference ### + # cut = None + # if filtersz % 2 == 0: # if even + # cut = (current_size + stride - 1) // stride layer = ConvLayer(num_input_filters, num_output_filters, filtersz, stride) + current_size = (current_size + stride - 1) // stride + # print("current_size:", current_size) self.conv_layers.append(layer) num_input_filters = num_output_filters - - ##### debug ##### - # Z = X / 255.0 - # j = 0 - # for layer in self.conv_layers: - # Z = layer.forward(Z) - # out = Z - # op = theano.function(inputs=[X], outputs=out, allow_input_downcast=True) - # test = op(np.random.randn(1, 4, IM_SIZE, IM_SIZE)) - # print("output size after conv %d: %s" % (j, test.shape)) - # j += 1 - - # get conv output size Z = X / 255.0 for layer in self.conv_layers: @@ -146,6 +253,7 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): # build fully connected layers self.layers = [] M1 = flattened_ouput_size + # print("flattened_ouput_size:", flattened_ouput_size) for M2 in hidden_layer_sizes: layer = HiddenLayer(M1, M2) self.layers.append(layer) @@ -171,18 +279,7 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): cost = T.mean((G - selected_action_values)**2) # create train function - # we need to ensure cache is updated before parameter update - # by creating a list of new_caches - # and using them in the parameter update - grads = T.grad(cost, self.params) - caches = [theano.shared(np.ones_like(p.get_value())) for p in self.params] - new_caches = [decay*c + (np.float32(1) - decay)*g*g for c, g in zip(caches, grads)] - - c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)] - g_update = [ - (p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(self.params, new_caches, grads) - ] - updates = c_update + g_update + updates = adam(cost, self.params) # compile functions self.train_op = theano.function( @@ -221,8 +318,7 @@ def sample_action(self, x, eps): def learn(model, target_model, experience_replay_buffer, gamma, batch_size): # Sample experiences - samples = random.sample(experience_replay_buffer, batch_size) - states, actions, rewards, next_states, dones = map(np.array, zip(*samples)) + states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch() # Calculate targets next_Qs = target_model.predict(next_states) @@ -252,7 +348,6 @@ def play_one( obs = env.reset() obs_small = downsample_image(obs) state = np.stack([obs_small] * 4, axis=0) - assert(state.shape == (4, 80, 80)) loss = None @@ -268,24 +363,16 @@ def play_one( target_model.copy_from(model) print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD)) - # Take action action = model.sample_action(state, epsilon) obs, reward, done, _ = env.step(action) obs_small = downsample_image(obs) next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0) - # assert(state.shape == (4, 80, 80)) - - episode_reward += reward - # Remove oldest experience if replay buffer is full - if len(experience_replay_buffer) == MAX_EXPERIENCES: - experience_replay_buffer.pop(0) - # Save the latest experience - experience_replay_buffer.append((state, action, reward, next_state, done)) + experience_replay_buffer.add_experience(action, obs_small, reward, done) # Train the model, keep track of time t0_2 = datetime.now() @@ -304,6 +391,15 @@ def play_one( return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + if __name__ == '__main__': @@ -312,9 +408,9 @@ def play_one( hidden_layer_sizes = [512] gamma = 0.99 batch_sz = 32 - num_episodes = 10000 + num_episodes = 5000 total_t = 0 - experience_replay_buffer = [] + experience_replay_buffer = ReplayMemory() episode_rewards = np.zeros(num_episodes) step_counts = np.zeros(num_episodes) @@ -339,40 +435,30 @@ def play_one( conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, gamma=gamma, - # scope="model" ) target_model = DQN( K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, gamma=gamma, - # scope="target_model" ) print("Populating experience replay buffer...") obs = env.reset() obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - # assert(state.shape == (4, 80, 80)) for i in range(MIN_EXPERIENCES): - action = np.random.choice(K) - obs, reward, done, _ = env.step(action) - next_state = update_state(state, obs) - # assert(state.shape == (4, 80, 80)) - experience_replay_buffer.append((state, action, reward, next_state, done)) + action = np.random.choice(K) + obs, reward, done, _ = env.step(action) + experience_replay_buffer.add_experience(action, obs_small, reward, done) - if done: - obs = env.reset() - obs_small = downsample_image(obs) - state = np.stack([obs_small] * 4, axis=0) - # assert(state.shape == (4, 80, 80)) - else: - state = next_state + if done: + obs = env.reset() # Play a number of episodes and learn! + t0 = datetime.now() for i in range(num_episodes): total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one( @@ -402,5 +488,13 @@ def play_one( "Epsilon:", "%.3f" % epsilon ) sys.stdout.flush() + print("Total duration:", datetime.now() - t0) + + # Plot the smoothed returns + y = smooth(episode_rewards) + plt.plot(episode_rewards, label='orig') + plt.plot(y, label='smoothed') + plt.legend() + plt.show() From f108f1caaeb3f425a0c52207655acab41f3b601c Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 8 Feb 2019 22:52:43 -0500 Subject: [PATCH 127/329] update --- rl2/atari/dqn_tf.py | 2 +- rl2/atari/dqn_theano.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rl2/atari/dqn_tf.py b/rl2/atari/dqn_tf.py index 3894fda9..71a6d9c2 100644 --- a/rl2/atari/dqn_tf.py +++ b/rl2/atari/dqn_tf.py @@ -60,7 +60,7 @@ def update_state(state, obs_small): class ReplayMemory: - def __init__(self, size=500000, frame_height=IM_SIZE, frame_width=IM_SIZE, + def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE, agent_history_length=4, batch_size=32): """ Args: diff --git a/rl2/atari/dqn_theano.py b/rl2/atari/dqn_theano.py index f04c1772..efdb8d8e 100644 --- a/rl2/atari/dqn_theano.py +++ b/rl2/atari/dqn_theano.py @@ -59,7 +59,7 @@ def update_state(state, obs): class ReplayMemory: - def __init__(self, size=500000, frame_height=IM_SIZE, frame_width=IM_SIZE, + def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE, agent_history_length=4, batch_size=32): """ Args: From d715846c167301273406be1474994fab40ec8289 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 10 Feb 2019 00:21:57 -0500 Subject: [PATCH 128/329] update --- rl2/atari/dqn_tf.py | 4 +--- rl2/atari/dqn_theano.py | 10 +++++----- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/rl2/atari/dqn_tf.py b/rl2/atari/dqn_tf.py index 71a6d9c2..34c1ab16 100644 --- a/rl2/atari/dqn_tf.py +++ b/rl2/atari/dqn_tf.py @@ -145,7 +145,7 @@ def get_minibatch(self): class DQN: - def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope): + def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, scope): self.K = K self.scope = scope @@ -374,13 +374,11 @@ def smooth(x): K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, - gamma=gamma, scope="model") target_model = DQN( K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, - gamma=gamma, scope="target_model" ) image_transformer = ImageTransformer() diff --git a/rl2/atari/dqn_theano.py b/rl2/atari/dqn_theano.py index efdb8d8e..e0114b59 100644 --- a/rl2/atari/dqn_theano.py +++ b/rl2/atari/dqn_theano.py @@ -217,7 +217,7 @@ def forward(self, X): return self.f(a) class DQN: - def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): + def __init__(self, K, conv_layer_sizes, hidden_layer_sizes): self.K = K # inputs and targets @@ -253,7 +253,7 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): # build fully connected layers self.layers = [] M1 = flattened_ouput_size - # print("flattened_ouput_size:", flattened_ouput_size) + print("flattened_ouput_size:", flattened_ouput_size) for M2 in hidden_layer_sizes: layer = HiddenLayer(M1, M2) self.layers.append(layer) @@ -284,6 +284,7 @@ def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma): # compile functions self.train_op = theano.function( inputs=[X, G, actions], + outputs=cost, updates=updates, allow_input_downcast=True ) @@ -305,7 +306,7 @@ def predict(self, X): return self.predict_op(X) def update(self, states, actions, targets): - self.train_op(states, targets, actions) + return self.train_op(states, targets, actions) def sample_action(self, x, eps): if np.random.random() < eps: @@ -434,13 +435,11 @@ def smooth(x): K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, - gamma=gamma, ) target_model = DQN( K=K, conv_layer_sizes=conv_layer_sizes, hidden_layer_sizes=hidden_layer_sizes, - gamma=gamma, ) @@ -451,6 +450,7 @@ def smooth(x): action = np.random.choice(K) obs, reward, done, _ = env.step(action) + obs_small = downsample_image(obs) experience_replay_buffer.add_experience(action, obs_small, reward, done) if done: From f415da5ffc9f5cac36440fb5fedceae8c8440ee1 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 16 Feb 2019 15:59:47 -0500 Subject: [PATCH 129/329] update --- ann_class2/theano2.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ann_class2/theano2.py b/ann_class2/theano2.py index 3bf29744..5dd86896 100644 --- a/ann_class2/theano2.py +++ b/ann_class2/theano2.py @@ -36,8 +36,10 @@ def main(): lr = 0.0004 reg = 0.01 - Xtrain = Xtest.astype(np.float32) - Ytrain = Ytest.astype(np.float32) + Xtrain = Xtrain.astype(np.float32) + Ytrain = Ytrain.astype(np.float32) + Xtest = Xtest.astype(np.float32) + Ytest = Ytest.astype(np.float32) Ytrain_ind = y2indicator(Ytrain).astype(np.float32) Ytest_ind = y2indicator(Ytest).astype(np.float32) From f0f1e8616314ca204f66a13adf315ebf63b7dd6a Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 20 Feb 2019 21:01:04 -0500 Subject: [PATCH 130/329] update --- cnn_class/edge_benchmark.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cnn_class/edge_benchmark.py b/cnn_class/edge_benchmark.py index de605634..3394289e 100644 --- a/cnn_class/edge_benchmark.py +++ b/cnn_class/edge_benchmark.py @@ -82,7 +82,7 @@ def main(): # define variables and expressions X = tf.placeholder(tf.float32, shape=(None, D), name='X') - T = tf.placeholder(tf.float32, shape=(None, K), name='T') + T = tf.placeholder(tf.int32, shape=(None,), name='T') W1 = tf.Variable(W1_init.astype(np.float32)) b1 = tf.Variable(b1_init.astype(np.float32)) W2 = tf.Variable(W2_init.astype(np.float32)) From 9e35e19a04b7048a7174da576c51b2b5b568aecc Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 20 Feb 2019 21:27:59 -0500 Subject: [PATCH 131/329] update --- cnn_class/edge_benchmark.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cnn_class/edge_benchmark.py b/cnn_class/edge_benchmark.py index 3394289e..5e68d774 100644 --- a/cnn_class/edge_benchmark.py +++ b/cnn_class/edge_benchmark.py @@ -63,7 +63,7 @@ def main(): Ytest = test['y'].flatten() - 1 # gradient descent params - max_iter = 6 + max_iter = 15 print_period = 10 N, D = Xtrain.shape batch_sz = 500 From df32fc53f26981895395694d9194219d875f3499 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 20 Feb 2019 22:25:19 -0500 Subject: [PATCH 132/329] update --- cnn_class/keras_example.py | 121 +++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 cnn_class/keras_example.py diff --git a/cnn_class/keras_example.py b/cnn_class/keras_example.py new file mode 100644 index 00000000..4667d84f --- /dev/null +++ b/cnn_class/keras_example.py @@ -0,0 +1,121 @@ +# https://deeplearningcourses.com/c/advanced-computer-vision +# https://www.udemy.com/advanced-computer-vision + +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.models import Sequential, Model +from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Input + +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np + +from datetime import datetime +from scipy.io import loadmat +from sklearn.utils import shuffle + +from benchmark import get_data, error_rate + + +# helper +# def y2indicator(Y): +# N = len(Y) +# K = len(set(Y)) +# I = np.zeros((N, K)) +# I[np.arange(N), Y] = 1 +# return I + +def rearrange(X): + # input is (32, 32, 3, N) + # output is (N, 32, 32, 3) + # N = X.shape[-1] + # out = np.zeros((N, 32, 32, 3), dtype=np.float32) + # for i in xrange(N): + # for j in xrange(3): + # out[i, :, :, j] = X[:, :, j, i] + # return out / 255 + return (X.transpose(3, 0, 1, 2) / 255.).astype(np.float32) + + +# get the data +train, test = get_data() + +# Need to scale! don't leave as 0..255 +# Y is a N x 1 matrix with values 1..10 (MATLAB indexes by 1) +# So flatten it and make it 0..9 +# Also need indicator matrix for cost calculation +Xtrain = rearrange(train['X']) +Ytrain = train['y'].flatten() - 1 +del train + +Xtest = rearrange(test['X']) +Ytest = test['y'].flatten() - 1 +del test + + + +# get shapes +K = len(set(Ytrain)) + + + +# make the CNN +i = Input(shape=Xtrain.shape[1:]) +x = Conv2D(filters=20, kernel_size=(5, 5))(i) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=50, kernel_size=(5, 5))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dense(units=500)(x) +x = Activation('relu')(x) +x = Dropout(0.3)(x) +x = Dense(units=K)(x) +x = Activation('softmax')(x) + +model = Model(inputs=i, outputs=x) + + +# list of losses: https://keras.io/losses/ +# list of optimizers: https://keras.io/optimizers/ +# list of metrics: https://keras.io/metrics/ +model.compile( + loss='sparse_categorical_crossentropy', + optimizer='adam', + metrics=['accuracy'] +) + +# note: multiple ways to choose a backend +# either theano, tensorflow, or cntk +# https://keras.io/backend/ + + +# gives us back a +r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=10, batch_size=32) +print("Returned:", r) + +# print the available keys +# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc']) +print(r.history.keys()) + +# plot some data +plt.plot(r.history['loss'], label='loss') +plt.plot(r.history['val_loss'], label='val_loss') +plt.legend() +plt.show() + +# accuracies +plt.plot(r.history['acc'], label='acc') +plt.plot(r.history['val_acc'], label='val_acc') +plt.legend() +plt.show() + + From 06012773476612e9b75da01ca4896f6671b878c7 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 24 Feb 2019 14:24:53 -0500 Subject: [PATCH 133/329] add numpy exercises --- numpy_class/exercises/ex1.py | 29 +++++++++++++++++ numpy_class/exercises/ex2.py | 29 +++++++++++++++++ numpy_class/exercises/ex3.py | 34 ++++++++++++++++++++ numpy_class/exercises/ex4.py | 54 ++++++++++++++++++++++++++++++++ numpy_class/exercises/ex5.py | 60 ++++++++++++++++++++++++++++++++++++ numpy_class/exercises/ex6.py | 25 +++++++++++++++ numpy_class/exercises/ex7.py | 34 ++++++++++++++++++++ numpy_class/exercises/ex8.py | 50 ++++++++++++++++++++++++++++++ numpy_class/exercises/ex9.py | 28 +++++++++++++++++ 9 files changed, 343 insertions(+) create mode 100644 numpy_class/exercises/ex1.py create mode 100644 numpy_class/exercises/ex2.py create mode 100644 numpy_class/exercises/ex3.py create mode 100644 numpy_class/exercises/ex4.py create mode 100644 numpy_class/exercises/ex5.py create mode 100644 numpy_class/exercises/ex6.py create mode 100644 numpy_class/exercises/ex7.py create mode 100644 numpy_class/exercises/ex8.py create mode 100644 numpy_class/exercises/ex9.py diff --git a/numpy_class/exercises/ex1.py b/numpy_class/exercises/ex1.py new file mode 100644 index 00000000..c81d2c79 --- /dev/null +++ b/numpy_class/exercises/ex1.py @@ -0,0 +1,29 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +A = np.array([ + [0.3, 0.6, 0.1], + [0.5, 0.2, 0.3], + [0.4, 0.1, 0.5]]) + +v = np.ones(3) / 3 + +num_iters = 25 +distances = np.zeros(num_iters) +for i in range(num_iters): + v2 = v.dot(A) + d = np.linalg.norm(v2 - v) + distances[i] = d + v = v2 + +plt.plot(distances) +plt.show() \ No newline at end of file diff --git a/numpy_class/exercises/ex2.py b/numpy_class/exercises/ex2.py new file mode 100644 index 00000000..cea7b7e2 --- /dev/null +++ b/numpy_class/exercises/ex2.py @@ -0,0 +1,29 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +def sampleY(n=1000): + # draw n samples from uniform dist. + X = np.random.random(n) + Y = X.sum() + return Y + + +# now draw N Y's +N = 1000 +Y_samples = np.zeros(N) +for i in range(N): + Y_samples[i] = sampleY() + + +# now plot the Y_samples +plt.hist(Y_samples, bins=20) +plt.show() \ No newline at end of file diff --git a/numpy_class/exercises/ex3.py b/numpy_class/exercises/ex3.py new file mode 100644 index 00000000..eb46f424 --- /dev/null +++ b/numpy_class/exercises/ex3.py @@ -0,0 +1,34 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + + +# load in the data +df = pd.read_csv('../../large_files/train.csv') +data = df.values +X = data[:, 1:] # images +Y = data[:, 0] # labels + +# loop through each label +for k in range(10): + Xk = X[Y == k] + + # mean image + Mk = Xk.mean(axis=0) + + # reshape into an image + im = Mk.reshape(28, 28) + + # plot the image + plt.imshow(im, cmap='gray') + plt.title("Label: %s" % k) + plt.show() diff --git a/numpy_class/exercises/ex4.py b/numpy_class/exercises/ex4.py new file mode 100644 index 00000000..bdd0a90e --- /dev/null +++ b/numpy_class/exercises/ex4.py @@ -0,0 +1,54 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + + +# load in the data +df = pd.read_csv('../../large_files/train.csv') +data = df.values + +# shuffle the images +np.random.shuffle(data) + +X = data[:, 1:] # images +Y = data[:, 0] # labels + + +# define rotate functions +def rotate1(im): + return np.rot90(im, 3) + +def rotate2(im): + H, W = im.shape + im2 = np.zeros((W, H)) + for i in range(H): + for j in range(W): + im2[j,H - i - 1] = im[i,j] + return im2 + + +for i in range(X.shape[0]): + # get the image + im = X[i].reshape(28, 28) + + # flip the image + # im = rotate1(im) + im = rotate2(im) + + # plot the image + plt.imshow(im, cmap='gray') + plt.title("Label: %s" % Y[i]) + plt.show() + + ans = input("Continue? [Y/n]: ") + if ans and ans[0].lower() == 'n': + break diff --git a/numpy_class/exercises/ex5.py b/numpy_class/exercises/ex5.py new file mode 100644 index 00000000..8482d070 --- /dev/null +++ b/numpy_class/exercises/ex5.py @@ -0,0 +1,60 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + + +def is_symmetric1(A): + return np.all(A == A.T) + + +def is_symmetric2(A): + rows, cols = A.shape + if rows != cols: + return False + + for i in range(rows): + for j in range(cols): + if A[i,j] != A[j,i]: + return False + + return True + + +def check(A, b): + print("Testing:", A) + assert(is_symmetric1(A) == b) + assert(is_symmetric2(A) == b) + + +# test the functions +A = np.zeros((3, 3)) +check(A, True) + +A = np.eye(3) +check(A, True) + +A = np.random.randn(3, 2) +A = A.dot(A.T) +check(A, True) + +A = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]]) +check(A, True) + +A = np.random.randn(3, 2) +check(A, False) + +A = np.random.randn(3, 3) +check(A, False) + +A = np.arange(9).reshape(3, 3) +check(A, False) + diff --git a/numpy_class/exercises/ex6.py b/numpy_class/exercises/ex6.py new file mode 100644 index 00000000..b1c88288 --- /dev/null +++ b/numpy_class/exercises/ex6.py @@ -0,0 +1,25 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +# generate unlabeled data +N = 2000 +X = np.random.random((N, 2))*2 - 1 + +# generate labels +Y = np.zeros(N) +Y[(X[:,0] < 0) & (X[:,1] > 0)] = 1 +Y[(X[:,0] > 0) & (X[:,1] < 0)] = 1 + +# plot it +plt.scatter(X[:,0], X[:,1], c=Y) +plt.show() \ No newline at end of file diff --git a/numpy_class/exercises/ex7.py b/numpy_class/exercises/ex7.py new file mode 100644 index 00000000..46f5e0fb --- /dev/null +++ b/numpy_class/exercises/ex7.py @@ -0,0 +1,34 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +def get_donut(): + N = 2000 + R_inner = 5 + R_outer = 10 + + # distance from origin is radius + random normal + # angle theta is uniformly distributed between (0, 2pi) + R1 = np.random.randn(N//2) + R_inner + theta = 2*np.pi*np.random.random(N//2) + X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T + + R2 = np.random.randn(N//2) + R_outer + theta = 2*np.pi*np.random.random(N//2) + X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T + + X = np.concatenate([ X_inner, X_outer ]) + Y = np.array([0]*(N//2) + [1]*(N//2)) + return X, Y + +X, Y = get_donut() +plt.scatter(X[:,0], X[:,1], c=Y) +plt.show() \ No newline at end of file diff --git a/numpy_class/exercises/ex8.py b/numpy_class/exercises/ex8.py new file mode 100644 index 00000000..8d8926ba --- /dev/null +++ b/numpy_class/exercises/ex8.py @@ -0,0 +1,50 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + +def get_spiral(): + # Idea: radius -> low...high + # (don't start at 0, otherwise points will be "mushed" at origin) + # angle = low...high proportional to radius + # [0, 2pi/6, 4pi/6, ..., 10pi/6] --> [pi/2, pi/3 + pi/2, ..., ] + # x = rcos(theta), y = rsin(theta) as usual + + radius = np.linspace(1, 10, 100) + thetas = np.empty((6, 100)) + for i in range(6): + start_angle = np.pi*i / 3.0 + end_angle = start_angle + np.pi / 2 + points = np.linspace(start_angle, end_angle, 100) + thetas[i] = points + + # convert into cartesian coordinates + x1 = np.empty((6, 100)) + x2 = np.empty((6, 100)) + for i in range(6): + x1[i] = radius * np.cos(thetas[i]) + x2[i] = radius * np.sin(thetas[i]) + + # inputs + X = np.empty((600, 2)) + X[:,0] = x1.flatten() + X[:,1] = x2.flatten() + + # add noise + X += np.random.randn(600, 2)*0.5 + + # targets + Y = np.array([0]*100 + [1]*100 + [0]*100 + [1]*100 + [0]*100 + [1]*100) + return X, Y + + +X, Y = get_spiral() +plt.scatter(X[:,0], X[:,1], c=Y) +plt.show() \ No newline at end of file diff --git a/numpy_class/exercises/ex9.py b/numpy_class/exercises/ex9.py new file mode 100644 index 00000000..c77cd812 --- /dev/null +++ b/numpy_class/exercises/ex9.py @@ -0,0 +1,28 @@ +# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python +# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python + +from __future__ import print_function, division +from future.utils import iteritems +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + + +from ex8 import get_spiral + +# get the data +X, Y = get_spiral() + +# combine the data into one array +# data to be concatenated must have same # of dimensions +# e.g. N x D and N x 1 +# not N x D and N +data = np.concatenate((X, np.expand_dims(Y, 1)), axis=1) + +df = pd.DataFrame(data) +df.columns = ['x1', 'x2', 'y'] +df.to_csv('mydata.csv', index=False) \ No newline at end of file From 34d52937731160460de42030dd4d05c978dda88f Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 27 Feb 2019 15:24:01 -0500 Subject: [PATCH 134/329] update --- unsupervised_class/gmm.py | 5 ++-- unsupervised_class/kmeans.py | 46 +++++++++++++++++++++++-------- unsupervised_class/kmeans_fail.py | 6 ++-- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/unsupervised_class/gmm.py b/unsupervised_class/gmm.py index e2663567..77f95c24 100644 --- a/unsupervised_class/gmm.py +++ b/unsupervised_class/gmm.py @@ -25,7 +25,7 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): M[k] = X[np.random.choice(N)] C[k] = np.eye(D) - costs = np.zeros(max_iter) + costs = [] weighted_pdfs = np.zeros((N, K)) # we'll use these to store the PDF value of sample n and Gaussian k for i in range(max_iter): # step 1: determine assignments / resposibilities @@ -57,7 +57,8 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): # C[k] = np.sum(R[n,k]*np.outer(X[n] - M[k], X[n] - M[k]) for n in range(N)) / Nk + np.eye(D)*smoothing - costs[i] = np.log(weighted_pdfs.sum(axis=1)).sum() + c = np.log(weighted_pdfs.sum(axis=1)).sum() + costs.append(c) if i > 0: if np.abs(costs[i] - costs[i-1]) < 0.1: break diff --git a/unsupervised_class/kmeans.py b/unsupervised_class/kmeans.py index 16d75d31..b243b426 100644 --- a/unsupervised_class/kmeans.py +++ b/unsupervised_class/kmeans.py @@ -9,6 +9,7 @@ import numpy as np import matplotlib.pyplot as plt +from sklearn.metrics.pairwise import pairwise_distances def d(u, v): @@ -30,7 +31,7 @@ def cost(X, R, M): return cost -def plot_k_means(X, K, max_iter=20, beta=1.0, show_plots=True): +def plot_k_means(X, K, max_iter=20, beta=3.0, show_plots=False): N, D = X.shape M = np.zeros((K, D)) # R = np.zeros((N, K)) @@ -40,27 +41,41 @@ def plot_k_means(X, K, max_iter=20, beta=1.0, show_plots=True): for k in range(K): M[k] = X[np.random.choice(N)] - costs = np.zeros(max_iter) + costs = [] + k = 0 for i in range(max_iter): + k += 1 # step 1: determine assignments / resposibilities # is this inefficient? for k in range(K): for n in range(N): - # R[n,k] = np.exp(-beta*d(M[k], X[n])) / np.sum( np.exp(-beta*d(M[j], X[n])) for j in range(K) ) exponents[n,k] = np.exp(-beta*d(M[k], X[n])) - R = exponents / exponents.sum(axis=1, keepdims=True) - # assert(np.abs(R - R2).sum() < 1e-10) + # step 2: recalculate means - for k in range(K): - M[k] = R[:,k].dot(X) / R[:,k].sum() + # decent vectorization + # for k in range(K): + # M[k] = R[:,k].dot(X) / R[:,k].sum() + # oldM = M - costs[i] = cost(X, R, M) + # full vectorization + M = R.T.dot(X) / R.sum(axis=0, keepdims=True).T + # print("diff M:", np.abs(M - oldM).sum()) + + c = cost(X, R, M) + costs.append(c) if i > 0: - if np.abs(costs[i] - costs[i-1]) < 1e-5: + if np.abs(costs[-1] - costs[-2]) < 1e-5: break + if len(costs) > 1: + if costs[-1] > costs[-2]: + pass + # print("cost increased!") + # print("M:", M) + # print("R.min:", R.min(), "R.max:", R.max()) + if show_plots: plt.plot(costs) plt.title("Costs") @@ -71,6 +86,7 @@ def plot_k_means(X, K, max_iter=20, beta=1.0, show_plots=True): plt.scatter(X[:,0], X[:,1], c=colors) plt.show() + print("Final cost", costs[-1]) return M, R @@ -98,13 +114,19 @@ def main(): plt.show() K = 3 # luckily, we already know this - plot_k_means(X, K) + plot_k_means(X, K, beta=1.0, show_plots=True) + + K = 3 # luckily, we already know this + plot_k_means(X, K, beta=3.0, show_plots=True) + + K = 3 # luckily, we already know this + plot_k_means(X, K, beta=10.0, show_plots=True) K = 5 # what happens if we choose a "bad" K? - plot_k_means(X, K, max_iter=30) + plot_k_means(X, K, max_iter=30, show_plots=True) K = 5 # what happens if we change beta? - plot_k_means(X, K, max_iter=30, beta=0.3) + plot_k_means(X, K, max_iter=30, beta=0.3, show_plots=True) if __name__ == '__main__': diff --git a/unsupervised_class/kmeans_fail.py b/unsupervised_class/kmeans_fail.py index b1a311e0..9b090c28 100644 --- a/unsupervised_class/kmeans_fail.py +++ b/unsupervised_class/kmeans_fail.py @@ -35,19 +35,19 @@ def donut(): def main(): # donut X = donut() - plot_k_means(X, 2) + plot_k_means(X, 2, beta=0.1, show_plots=True) # elongated clusters X = np.zeros((1000, 2)) X[:500,:] = np.random.multivariate_normal([0, 0], [[1, 0], [0, 20]], 500) X[500:,:] = np.random.multivariate_normal([5, 0], [[1, 0], [0, 20]], 500) - plot_k_means(X, 2) + plot_k_means(X, 2, beta=0.1, show_plots=True) # different density X = np.zeros((1000, 2)) X[:950,:] = np.array([0,0]) + np.random.randn(950, 2) X[950:,:] = np.array([3,0]) + np.random.randn(50, 2) - plot_k_means(X, 2) + plot_k_means(X, 2, show_plots=True) From ec3d034ff2e4180194e08d21c941cc5304a2bcef Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 27 Feb 2019 16:13:35 -0500 Subject: [PATCH 135/329] update --- unsupervised_class/gmm.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsupervised_class/gmm.py b/unsupervised_class/gmm.py index 77f95c24..4bd94c79 100644 --- a/unsupervised_class/gmm.py +++ b/unsupervised_class/gmm.py @@ -25,7 +25,7 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): M[k] = X[np.random.choice(N)] C[k] = np.eye(D) - costs = [] + lls = [] weighted_pdfs = np.zeros((N, K)) # we'll use these to store the PDF value of sample n and Gaussian k for i in range(max_iter): # step 1: determine assignments / resposibilities @@ -57,14 +57,14 @@ def gmm(X, K, max_iter=20, smoothing=1e-2): # C[k] = np.sum(R[n,k]*np.outer(X[n] - M[k], X[n] - M[k]) for n in range(N)) / Nk + np.eye(D)*smoothing - c = np.log(weighted_pdfs.sum(axis=1)).sum() - costs.append(c) + ll = np.log(weighted_pdfs.sum(axis=1)).sum() + lls.append(ll) if i > 0: - if np.abs(costs[i] - costs[i-1]) < 0.1: + if np.abs(lls[i] - lls[i-1]) < 0.1: break - plt.plot(costs) - plt.title("Costs") + plt.plot(lls) + plt.title("Log-Likelihood") plt.show() random_colors = np.random.random((K, 3)) From 5329594c84c05ddb6c21b281b6994182b6dbf847 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 10 Mar 2019 01:48:33 -0500 Subject: [PATCH 136/329] update --- nlp_class2/tfidf_tsne.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nlp_class2/tfidf_tsne.py b/nlp_class2/tfidf_tsne.py index bceb652c..55bd4ce5 100644 --- a/nlp_class2/tfidf_tsne.py +++ b/nlp_class2/tfidf_tsne.py @@ -45,7 +45,7 @@ def main(): for w in word_list: if w not in word2idx: print("%s not found in vocab, remove it from \ - analogies to try or increase vocab size") + analogies to try or increase vocab size" % w) notfound = True if notfound: exit() @@ -57,6 +57,7 @@ def main(): # create raw counts first A = np.zeros((V, N)) + print("V:", V, "N:", N) j = 0 for sentence in sentences: for i in sentence: From d2d183a533d607347cb7f687e5611418adc0fcc7 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 3 Apr 2019 10:32:52 -0400 Subject: [PATCH 137/329] update --- supervised_class2/bias_variance_demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervised_class2/bias_variance_demo.py b/supervised_class2/bias_variance_demo.py index 609c7fed..4f4d2859 100644 --- a/supervised_class2/bias_variance_demo.py +++ b/supervised_class2/bias_variance_demo.py @@ -114,7 +114,7 @@ def f(X): for d in range(MAX_POLY): for i in range(Ntrain): delta = train_predictions[i,:,d] - avg_train_prediction[i,d] - variances[i,d] = delta.dot(delta) / N + variances[i,d] = delta.dot(delta) / len(delta) variance = variances.mean(axis=0) # make bias-variance plots From 082c10f2c78e2b6ba0ca4f33296103645523c4f6 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 16 Apr 2019 23:25:45 -0400 Subject: [PATCH 138/329] update --- rl/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index 9a2fbc48..81f52a48 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -1,3 +1,6 @@ +Hacking Google reCAPTCHA v3 using Reinforcement Learning +https://arxiv.org/pdf/1903.01003.pdf + Reinforcement Learning: A Tutorial Survey and Recent Advances - Abhijit Gosavi http://web.mst.edu/~gosavia/joc.pdf From fc3d6aa4eb84776a9fe0687fb11318471138e0df Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 17 Apr 2019 17:14:09 -0400 Subject: [PATCH 139/329] update --- rl2/extra_reading.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/rl2/extra_reading.txt b/rl2/extra_reading.txt index b43b6d4e..0f19f02c 100644 --- a/rl2/extra_reading.txt +++ b/rl2/extra_reading.txt @@ -1,3 +1,12 @@ +Random Features for Large-Scale Kernel Machines +http://www.robots.ox.ac.uk/~vgg/rg/papers/randomfeatures.pdf + +Reflections on Random Kitchen Sinks +http://www.argmin.net/2017/12/05/kitchen-sinks/ + +Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning +https://papers.nips.cc/paper/3495-weighted-sums-of-random-kitchen-sinks-replacing-minimization-with-randomization-in-learning + Sutton & Barto http://incompleteideas.net/sutton/book/the-book-2nd.html @@ -11,4 +20,4 @@ Playing Atari with Deep Reinforcement Learning https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf Asynchronous Methods for Deep Reinforcement Learning -https://arxiv.org/pdf/1602.01783.pdf +https://arxiv.org/pdf/1602.01783.pdf \ No newline at end of file From 05feb3970e1a5796aee994af5750ff3c17be2f0f Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 22 Apr 2019 04:22:07 -0400 Subject: [PATCH 140/329] update --- rl2/extra_reading.txt | 3 +++ svm_class/extra_reading.txt | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/rl2/extra_reading.txt b/rl2/extra_reading.txt index 0f19f02c..a4c20829 100644 --- a/rl2/extra_reading.txt +++ b/rl2/extra_reading.txt @@ -7,6 +7,9 @@ http://www.argmin.net/2017/12/05/kitchen-sinks/ Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning https://papers.nips.cc/paper/3495-weighted-sums-of-random-kitchen-sinks-replacing-minimization-with-randomization-in-learning +This guy generated some nice plots and code to demonstrate that RBFSampler works like a real RBF Kernel +https://www.kaggle.com/sy2002/rbfsampler-actually-is-not-using-any-rbfs + Sutton & Barto http://incompleteideas.net/sutton/book/the-book-2nd.html diff --git a/svm_class/extra_reading.txt b/svm_class/extra_reading.txt index e9f46c12..6f5c8ab9 100644 --- a/svm_class/extra_reading.txt +++ b/svm_class/extra_reading.txt @@ -59,4 +59,7 @@ Using the Nyström Method to Speed Up Kernel Machines https://papers.nips.cc/paper/1866-using-the-nystrom-method-to-speed-up-kernel-machines Nyström Method vs Random Fourier Features: A Theoretical and Empirical Comparison -https://papers.nips.cc/paper/4588-nystrom-method-vs-random-fourier-features-a-theoretical-and-empirical-comparison \ No newline at end of file +https://papers.nips.cc/paper/4588-nystrom-method-vs-random-fourier-features-a-theoretical-and-empirical-comparison + +This guy generated some nice plots and code to demonstrate that RBFSampler works like a real RBF Kernel +https://www.kaggle.com/sy2002/rbfsampler-actually-is-not-using-any-rbfs \ No newline at end of file From 30e757eb3786f996397258ddea382aeca07f7835 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 8 May 2019 01:07:48 -0400 Subject: [PATCH 141/329] update --- rl3/a2c/a2c.py | 214 ++++++++++++++++++++++ rl3/a2c/atari_wrappers.py | 289 ++++++++++++++++++++++++++++++ rl3/a2c/main.py | 55 ++++++ rl3/a2c/neural_network.py | 55 ++++++ rl3/a2c/play.py | 56 ++++++ rl3/a2c/subproc_vec_env.py | 106 +++++++++++ rl3/ddpg.py | 323 ++++++++++++++++++++++++++++++++++ rl3/es_flappy.py | 252 ++++++++++++++++++++++++++ rl3/es_mnist.py | 159 +++++++++++++++++ rl3/es_mujoco.py | 204 +++++++++++++++++++++ rl3/es_simple.py | 59 +++++++ rl3/extra_reading.txt | 22 +++ rl3/flappy2envs.py | 184 +++++++++++++++++++ rl3/gym_review.py | 60 +++++++ rl3/plot_ddpg_result.py | 38 ++++ rl3/plot_es_flappy_results.py | 21 +++ rl3/plot_es_mujoco_results.py | 21 +++ rl3/sample_test.py | 16 ++ 18 files changed, 2134 insertions(+) create mode 100644 rl3/a2c/a2c.py create mode 100644 rl3/a2c/atari_wrappers.py create mode 100644 rl3/a2c/main.py create mode 100644 rl3/a2c/neural_network.py create mode 100644 rl3/a2c/play.py create mode 100644 rl3/a2c/subproc_vec_env.py create mode 100644 rl3/ddpg.py create mode 100644 rl3/es_flappy.py create mode 100644 rl3/es_mnist.py create mode 100644 rl3/es_mujoco.py create mode 100644 rl3/es_simple.py create mode 100644 rl3/extra_reading.txt create mode 100644 rl3/flappy2envs.py create mode 100644 rl3/gym_review.py create mode 100644 rl3/plot_ddpg_result.py create mode 100644 rl3/plot_es_flappy_results.py create mode 100644 rl3/plot_es_mujoco_results.py create mode 100644 rl3/sample_test.py diff --git a/rl3/a2c/a2c.py b/rl3/a2c/a2c.py new file mode 100644 index 00000000..3b7d3268 --- /dev/null +++ b/rl3/a2c/a2c.py @@ -0,0 +1,214 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import time +import joblib +import numpy as np +import tensorflow as tf +import os + + +def set_global_seeds(i): + tf.set_random_seed(i) + np.random.seed(i) + + +def cat_entropy(logits): + a0 = logits - tf.reduce_max(logits, 1, keepdims=True) + ea0 = tf.exp(a0) + z0 = tf.reduce_sum(ea0, 1, keepdims=True) + p0 = ea0 / z0 + return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1) + + +def find_trainable_variables(key): + with tf.variable_scope(key): + return tf.trainable_variables() + + +def discount_with_dones(rewards, dones, gamma): + discounted = [] + r = 0 + for reward, done in zip(rewards[::-1], dones[::-1]): + r = reward + gamma * r * (1. - done) # fixed off by one bug + discounted.append(r) + return discounted[::-1] + + + +class Agent: + def __init__(self, Network, ob_space, ac_space, nenvs, nsteps, nstack, + ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, + alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6)): + config = tf.ConfigProto(intra_op_parallelism_threads=nenvs, + inter_op_parallelism_threads=nenvs) + config.gpu_options.allow_growth = True + sess = tf.Session(config=config) + nbatch = nenvs * nsteps + + A = tf.placeholder(tf.int32, [nbatch]) + ADV = tf.placeholder(tf.float32, [nbatch]) + R = tf.placeholder(tf.float32, [nbatch]) + LR = tf.placeholder(tf.float32, []) + + step_model = Network(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) + train_model = Network(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) + + neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=A) + pg_loss = tf.reduce_mean(ADV * neglogpac) + vf_loss = tf.reduce_mean(tf.squared_difference(tf.squeeze(train_model.vf), R) / 2.0) + entropy = tf.reduce_mean(cat_entropy(train_model.pi)) + loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + + params = find_trainable_variables("model") + grads = tf.gradients(loss, params) + if max_grad_norm is not None: + grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) + grads_and_params = list(zip(grads, params)) + trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon) + _train = trainer.apply_gradients(grads_and_params) + + def train(states, rewards, actions, values): + advs = rewards - values + feed_dict = {train_model.X: states, A: actions, ADV: advs, R: rewards, LR: lr} + policy_loss, value_loss, policy_entropy, _ = sess.run( + [pg_loss, vf_loss, entropy, _train], + feed_dict + ) + return policy_loss, value_loss, policy_entropy + + def save(save_path): + ps = sess.run(params) + joblib.dump(ps, save_path) + + def load(load_path): + loaded_params = joblib.load(load_path) + restores = [] + for p, loaded_p in zip(params, loaded_params): + restores.append(p.assign(loaded_p)) + ps = sess.run(restores) + + self.train = train + self.train_model = train_model + self.step_model = step_model + self.step = step_model.step + self.value = step_model.value + self.save = save + self.load = load + tf.global_variables_initializer().run(session=sess) + + +class Runner: + def __init__(self, env, agent, nsteps=5, nstack=4, gamma=0.99): + self.env = env + self.agent = agent + nh, nw, nc = env.observation_space.shape + nenv = env.num_envs + self.batch_ob_shape = (nenv * nsteps, nh, nw, nc * nstack) + self.state = np.zeros((nenv, nh, nw, nc * nstack), dtype=np.uint8) + self.nc = nc + obs = env.reset() + self.update_state(obs) + self.gamma = gamma + self.nsteps = nsteps + self.dones = [False for _ in range(nenv)] + self.total_rewards = [] # store all workers' total rewards + self.real_total_rewards = [] + + def update_state(self, obs): + # Do frame-stacking here instead of the FrameStack wrapper to reduce IPC overhead + self.state = np.roll(self.state, shift=-self.nc, axis=3) + self.state[:, :, :, -self.nc:] = obs + + def run(self): + mb_states, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], [] + for n in range(self.nsteps): + actions, values = self.agent.step(self.state) + mb_states.append(np.copy(self.state)) + mb_actions.append(actions) + mb_values.append(values) + mb_dones.append(self.dones) + obs, rewards, dones, infos = self.env.step(actions) + for done, info in zip(dones, infos): + if done: + self.total_rewards.append(info['reward']) + if info['total_reward'] != -1: + self.real_total_rewards.append(info['total_reward']) + self.dones = dones + for n, done in enumerate(dones): + if done: + self.state[n] = self.state[n] * 0 + self.update_state(obs) + mb_rewards.append(rewards) + mb_dones.append(self.dones) + # batch of steps to batch of rollouts + mb_states = np.asarray(mb_states, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape) + mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) + mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) + mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) + mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) + mb_dones = mb_dones[:, 1:] + last_values = self.agent.value(self.state).tolist() + # discount/bootstrap off value fn + for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): + rewards = rewards.tolist() + dones = dones.tolist() + if dones[-1] == 0: + rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1] + else: + rewards = discount_with_dones(rewards, dones, self.gamma) + mb_rewards[n] = rewards + mb_rewards = mb_rewards.flatten() + mb_actions = mb_actions.flatten() + mb_values = mb_values.flatten() + return mb_states, mb_rewards, mb_actions, mb_values + + +def learn(network, env, seed, new_session=True, nsteps=5, nstack=4, total_timesteps=int(80e6), + vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, + epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=1000): + tf.reset_default_graph() + set_global_seeds(seed) + + nenvs = env.num_envs + env_id = env.env_id + save_name = os.path.join('models', env_id + '.save') + ob_space = env.observation_space + ac_space = env.action_space + agent = Agent(Network=network, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, + nsteps=nsteps, nstack=nstack, + ent_coef=ent_coef, vf_coef=vf_coef, + max_grad_norm=max_grad_norm, + lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps) + if os.path.exists(save_name): + agent.load(save_name) + + runner = Runner(env, agent, nsteps=nsteps, nstack=nstack, gamma=gamma) + + nbatch = nenvs * nsteps + tstart = time.time() + for update in range(1, total_timesteps // nbatch + 1): + states, rewards, actions, values = runner.run() + policy_loss, value_loss, policy_entropy = agent.train( + states, rewards, actions, values) + nseconds = time.time() - tstart + fps = int((update * nbatch) / nseconds) + if update % log_interval == 0 or update == 1: + print(' - - - - - - - ') + print("nupdates", update) + print("total_timesteps", update * nbatch) + print("fps", fps) + print("policy_entropy", float(policy_entropy)) + print("value_loss", float(value_loss)) + + # total reward + r = runner.total_rewards[-100:] # get last 100 + tr = runner.real_total_rewards[-100:] + if len(r) == 100: + print("avg reward (last 100):", np.mean(r)) + if len(tr) == 100: + print("avg total reward (last 100):", np.mean(tr)) + print("max (last 100):", np.max(tr)) + + agent.save(save_name) + + env.close() + agent.save(save_name) diff --git a/rl3/a2c/atari_wrappers.py b/rl3/a2c/atari_wrappers.py new file mode 100644 index 00000000..d0b6531a --- /dev/null +++ b/rl3/a2c/atari_wrappers.py @@ -0,0 +1,289 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +from collections import deque +import gym +from gym import spaces +import cv2 # opencv-python + + +class NoopResetEnv(gym.Wrapper): + def __init__(self, env, noop_max=30): + """Sample initial states by taking random number of no-ops on reset. + No-op is assumed to be action 0. + """ + gym.Wrapper.__init__(self, env) + self.noop_max = noop_max + self.override_num_noops = None + self.noop_action = 0 + assert env.unwrapped.get_action_meanings()[0] == 'NOOP' + + def reset(self, **kwargs): + """ Do no-op action for a number of steps in [1, noop_max].""" + self.env.reset(**kwargs) + if self.override_num_noops is not None: + noops = self.override_num_noops + else: + noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101 + assert noops > 0 + obs = None + for _ in range(noops): + obs, _, done, _ = self.env.step(self.noop_action) + if done: + obs = self.env.reset(**kwargs) + return obs + + def step(self, ac): + return self.env.step(ac) + + +class FireResetEnv(gym.Wrapper): + def __init__(self, env): + """Take action on reset for environments that are fixed until firing.""" + gym.Wrapper.__init__(self, env) + assert env.unwrapped.get_action_meanings()[1] == 'FIRE' + assert len(env.unwrapped.get_action_meanings()) >= 3 + + def reset(self, **kwargs): + self.env.reset(**kwargs) + obs, _, done, _ = self.env.step(1) + if done: + self.env.reset(**kwargs) + obs, _, done, _ = self.env.step(2) + if done: + self.env.reset(**kwargs) + return obs + + def step(self, ac): + return self.env.step(ac) + + +class EpisodicLifeEnv(gym.Wrapper): + def __init__(self, env): + """Make end-of-life == end-of-episode, but only reset on true game over. + Done by DeepMind for the DQN and co. since it helps value estimation. + """ + gym.Wrapper.__init__(self, env) + self.lives = 0 + self.was_real_done = True + + def step(self, action): + obs, reward, done, info = self.env.step(action) + self.was_real_done = done + # check current lives, make loss of life terminal, + # then update lives to handle bonus lives + lives = self.env.unwrapped.ale.lives() + if lives < self.lives and lives > 0: + # for Qbert sometimes we stay in lives == 0 condtion for a few frames + # so its important to keep lives > 0, so that we only reset once + # the environment advertises done. + done = True + self.lives = lives + return obs, reward, done, info + + def reset(self, **kwargs): + """Reset only when lives are exhausted. + This way all states are still reachable even though lives are episodic, + and the learner need not know about any of this behind-the-scenes. + """ + if self.was_real_done: + obs = self.env.reset(**kwargs) + else: + # no-op step to advance from terminal/lost life state + obs, _, _, _ = self.env.step(0) + self.lives = self.env.unwrapped.ale.lives() + return obs + + +class MaxAndSkipEnv(gym.Wrapper): + def __init__(self, env, skip=4): + """Return only every `skip`-th frame""" + gym.Wrapper.__init__(self, env) + # most recent raw observations (for max pooling across time steps) + self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype='uint8') + self._skip = skip + + def step(self, action): + """Repeat action, sum reward, and max over last observations.""" + total_reward = 0.0 + done = None + for i in range(self._skip): + obs, reward, done, info = self.env.step(action) + if i == self._skip - 2: + self._obs_buffer[0] = obs + if i == self._skip - 1: + self._obs_buffer[1] = obs + total_reward += reward + if done: + break + # Note that the observation on the done=True frame + # doesn't matter + max_frame = self._obs_buffer.max(axis=0) + + return max_frame, total_reward, done, info + + def reset(self, **kwargs): + return self.env.reset(**kwargs) + + +class ClipRewardEnv(gym.RewardWrapper): + def reward(self, reward): + """Bin reward to {+1, 0, -1} by its sign.""" + return np.sign(reward) + + +# class WarpFrame(gym.ObservationWrapper): +# def __init__(self, env): +# """Warp frames to 84x84 as done in the Nature paper and later work.""" +# gym.ObservationWrapper.__init__(self, env) +# self.width = 84 +# self.height = 84 +# self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 1)) + +# def _observation(self, frame): +# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) +# frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA) +# return frame[:, :, None] +class WarpFrame(gym.ObservationWrapper): + def __init__(self, env, width=84, height=84, grayscale=True): + """Warp frames to 84x84 as done in the Nature paper and later work.""" + gym.ObservationWrapper.__init__(self, env) + self.width = width + self.height = height + self.grayscale = grayscale + if self.grayscale: + self.observation_space = spaces.Box(low=0, high=255, + shape=(self.height, self.width, 1), dtype=np.uint8) + else: + self.observation_space = spaces.Box(low=0, high=255, + shape=(self.height, self.width, 3), dtype=np.uint8) + + def observation(self, frame): + if self.grayscale: + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA) + if self.grayscale: + frame = np.expand_dims(frame, -1) + return frame + + +class FrameStack(gym.Wrapper): + def __init__(self, env, k): + """Stack k last frames. + + Returns lazy array, which is much more memory efficient. + + See Also + -------- + baselines.common.atari_wrappers.LazyFrames + """ + gym.Wrapper.__init__(self, env) + self.k = k + self.frames = deque([], maxlen=k) + shp = env.observation_space.shape + self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k)) + + def reset(self): + ob = self.env.reset() + for _ in range(self.k): + self.frames.append(ob) + return self._get_ob() + + def step(self, action): + ob, reward, done, info = self.env.step(action) + self.frames.append(ob) + return self._get_ob(), reward, done, info + + def _get_ob(self): + assert len(self.frames) == self.k + return LazyFrames(list(self.frames)) + + +class LazyFrames: + def __init__(self, frames): + """This object ensures that common frames between the observations are only stored once. + It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay + buffers. + + This object should only be converted to numpy array before being passed to the model. + + You'd not believe how complex the previous solution was.""" + self._frames = frames + + def __array__(self, dtype=None): + out = np.concatenate(self._frames, axis=2) + if dtype is not None: + out = out.astype(dtype) + return out + + +def make_atari(env_id): + env = gym.make(env_id) + assert 'NoFrameskip' in env.spec.id + env = NoopResetEnv(env, noop_max=30) + env = MaxAndSkipEnv(env, skip=4) + return env + + +def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False): + """Configure environment for DeepMind-style Atari. + """ + if episode_life: + env = EpisodicLifeEnv(env) + + if 'FIRE' in env.unwrapped.get_action_meanings(): + env = FireResetEnv(env) + env = WarpFrame(env) + + if clip_rewards: + env = ClipRewardEnv(env) + + if frame_stack: + env = FrameStack(env, 4) + + return env + + +class Monitor(gym.Wrapper): + def __init__(self, env, rank=0): + gym.Wrapper.__init__(self, env=env) + self.rank = rank + self.rewards = [] + self.total_reward = [] + self.summaries_dict = {'reward': 0, 'episode_length': 0, 'total_reward': 0, 'total_episode_length': 0} + env = self.env + while True: + if hasattr(env, 'was_real_done'): + self.episodic_env = env + if not hasattr(env, 'env'): + break + env = env.env + + def reset(self): + self.summaries_dict['reward'] = -1 + self.summaries_dict['episode_length'] = -1 + self.summaries_dict['total_reward'] = -1 + self.summaries_dict['total_episode_length'] = -1 + self.rewards = [] + env = self.env + if self.episodic_env.was_real_done: + self.summaries_dict['total_reward'] = -1 + self.summaries_dict['total_episode_length'] = -1 + self.total_reward = [] + return self.env.reset() + + def step(self, action): + observation, reward, done, info = self.env.step(action) + self.rewards.append(reward) + self.total_reward.append(reward) + if done: + # print("Done! R = %s, N = %s" % (sum(self.rewards), len(self.rewards))) + self.summaries_dict['reward'] = sum(self.rewards) + self.summaries_dict['episode_length'] = len(self.rewards) + + if self.episodic_env.was_real_done: + self.summaries_dict['total_reward'] = sum(self.total_reward) + self.summaries_dict['total_episode_length'] = len(self.total_reward) + info = self.summaries_dict.copy() # otherwise it will be overwritten + # if done: + # print("info:", info) + return observation, reward, done, info diff --git a/rl3/a2c/main.py b/rl3/a2c/main.py new file mode 100644 index 00000000..3bf85105 --- /dev/null +++ b/rl3/a2c/main.py @@ -0,0 +1,55 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +from subproc_vec_env import SubprocVecEnv +from atari_wrappers import make_atari, wrap_deepmind, Monitor + +from neural_network import CNN +from a2c import learn + +import os + +import gym +import argparse +import logging + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Mute missing instructions errors + +MODEL_PATH = 'models' +SEED = 0 + + +def get_args(): + # Get some basic command line arguements + parser = argparse.ArgumentParser() + parser.add_argument('-e', '--env', help='environment ID', default='BreakoutNoFrameskip-v4') + parser.add_argument('-s', '--steps', help='training steps', type=int, default=int(80e6)) + parser.add_argument('--nenv', help='No. of environments', type=int, default=16) + return parser.parse_args() + + +def train(env_id, num_timesteps, num_cpu): + def make_env(rank): + def _thunk(): + env = make_atari(env_id) + env.seed(SEED + rank) + gym.logger.setLevel(logging.WARN) + env = wrap_deepmind(env) + + # wrap the env one more time for getting total reward + env = Monitor(env, rank) + return env + return _thunk + + env = SubprocVecEnv([make_env(i) for i in range(num_cpu)]) + learn(CNN, env, SEED, total_timesteps=int(num_timesteps * 1.1)) + env.close() + pass + + +def main(): + args = get_args() + os.makedirs(MODEL_PATH, exist_ok=True) + train(args.env, args.steps, num_cpu=args.nenv) + + +if __name__ == "__main__": + main() diff --git a/rl3/a2c/neural_network.py b/rl3/a2c/neural_network.py new file mode 100644 index 00000000..f23db4eb --- /dev/null +++ b/rl3/a2c/neural_network.py @@ -0,0 +1,55 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import tensorflow as tf + + +def sample(logits): + noise = tf.random_uniform(tf.shape(logits)) + return tf.argmax(logits - tf.log(-tf.log(noise)), 1) + + +def conv(inputs, nf, ks, strides, gain=1.0): + return tf.layers.conv2d(inputs=inputs, filters=nf, kernel_size=ks, + strides=(strides, strides), activation=tf.nn.relu, + kernel_initializer=tf.orthogonal_initializer(gain=gain)) + + +def dense(inputs, n, act=tf.nn.relu, gain=1.0): + return tf.layers.dense(inputs=inputs, units=n, activation=act, + kernel_initializer=tf.orthogonal_initializer(gain)) + + +class CNN: + + def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, reuse=False): + gain = np.sqrt(2) + nbatch = nenv * nsteps + nh, nw, nc = ob_space.shape + ob_shape = (nbatch, nh, nw, nc * nstack) + X = tf.placeholder(tf.uint8, ob_shape) # obs + X_normal = tf.cast(X, tf.float32) / 255.0 + with tf.variable_scope("model", reuse=reuse): + h1 = conv(X_normal, 32, 8, 4, gain) + h2 = conv(h1, 64, 4, 2, gain) + h3 = conv(h2, 64, 3, 1, gain) + h3 = tf.layers.flatten(h3) + h4 = dense(h3, 512, gain=gain) + pi = dense(h4, ac_space.n, act=None) + vf = dense(h4, 1, act=None) + + v0 = vf[:, 0] + a0 = sample(pi) + # self.initial_state = [] # State reserved for LSTM + + def step(ob): + a, v = sess.run([a0, v0], {X: ob}) + return a, v#, [] # dummy state + + def value(ob): + return sess.run(v0, {X: ob}) + + self.X = X + self.pi = pi + self.vf = vf + self.step = step + self.value = value diff --git a/rl3/a2c/play.py b/rl3/a2c/play.py new file mode 100644 index 00000000..c35ae6bb --- /dev/null +++ b/rl3/a2c/play.py @@ -0,0 +1,56 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import argparse +import os +import numpy as np +from atari_wrappers import make_atari, wrap_deepmind, Monitor +from a2c import Agent +from neural_network import CNN +import imageio +import time + + +def get_args(): + # Get some basic command line arguements + parser = argparse.ArgumentParser() + parser.add_argument('-e', '--env', help='environment ID', default='BreakoutNoFrameskip-v4') + return parser.parse_args() + + +def get_agent(env, nsteps=5, nstack=1, total_timesteps=int(80e6), + vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, + epsilon=1e-5, alpha=0.99): + # Note: nstack=1 since frame_stack=True, during training frame_stack=False + agent = Agent(Network=CNN, ob_space=env.observation_space, + ac_space=env.action_space, nenvs=1, nsteps=nsteps, nstack=nstack, + ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, + lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps) + return agent + + +def main(): + env_id = get_args().env + env = make_atari(env_id) + env = wrap_deepmind(env, frame_stack=True, clip_rewards=False, episode_life=True) + env = Monitor(env) + # rewards will appear higher than during training since rewards are not clipped + + agent = get_agent(env) + + # check for save path + save_path = os.path.join('models', env_id + '.save') + agent.load(save_path) + + obs = env.reset() + renders = [] + while True: + obs = np.expand_dims(obs.__array__(), axis=0) + a, v = agent.step(obs) + obs, reward, done, info = env.step(a) + env.render() + if done: + print(info) + env.reset() + + +if __name__ == '__main__': + main() diff --git a/rl3/a2c/subproc_vec_env.py b/rl3/a2c/subproc_vec_env.py new file mode 100644 index 00000000..2c0a0808 --- /dev/null +++ b/rl3/a2c/subproc_vec_env.py @@ -0,0 +1,106 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +from multiprocessing import Process, Pipe + + +def worker(remote, parent_remote, env_fn_wrapper): + parent_remote.close() + env = env_fn_wrapper.x() + while True: + cmd, data = remote.recv() + if cmd == 'step': + ob, reward, done, info = env.step(data) + if done: + ob = env.reset() + remote.send((ob, reward, done, info)) + elif cmd == 'reset': + ob = env.reset() + remote.send(ob) + elif cmd == 'reset_task': + ob = env.reset_task() + remote.send(ob) + elif cmd == 'close': + remote.close() + break + elif cmd == 'get_spaces': + remote.send((env.action_space, env.observation_space)) + elif cmd == 'get_id': + remote.send(env.spec.id) + else: + raise NotImplementedError + + +class CloudpickleWrapper(): + """ + Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) + """ + + def __init__(self, x): + self.x = x + + def __getstate__(self): + import cloudpickle + return cloudpickle.dumps(self.x) + + def __setstate__(self, ob): + import pickle + self.x = pickle.loads(ob) + + +class SubprocVecEnv(): + def __init__(self, env_fns): + """ + envs: list of gym environments to run in subprocesses + """ + self.closed = False + nenvs = len(env_fns) + self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) + self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) + for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] + for p in self.ps: + p.daemon = True # if the main process crashes, we should not cause things to hang + p.start() + for remote in self.work_remotes: + remote.close() + + self.remotes[0].send(('get_spaces', None)) + self.action_space, self.observation_space = self.remotes[0].recv() + + self.remotes[0].send(('get_id', None)) + self.env_id = self.remotes[0].recv() + + def step(self, actions): + for remote, action in zip(self.remotes, actions): + remote.send(('step', action)) + results = [remote.recv() for remote in self.remotes] + obs, rews, dones, infos = zip(*results) + # print("Infos:", infos) + # for done, info in zip(dones, infos): + # if done: + # # print("Total reward:", info['reward'], "Num steps:", info['episode_length']) + # print("Returned info:", info, "Done:", done) + return np.stack(obs), np.stack(rews), np.stack(dones), infos + + def reset(self): + for remote in self.remotes: + remote.send(('reset', None)) + return np.stack([remote.recv() for remote in self.remotes]) + + def reset_task(self): + for remote in self.remotes: + remote.send(('reset_task', None)) + return np.stack([remote.recv() for remote in self.remotes]) + + def close(self): + if self.closed: + return + + for remote in self.remotes: + remote.send(('close', None)) + for p in self.ps: + p.join() + self.closed = True + + @property + def num_envs(self): + return len(self.remotes) diff --git a/rl3/ddpg.py b/rl3/ddpg.py new file mode 100644 index 00000000..3eb80d1c --- /dev/null +++ b/rl3/ddpg.py @@ -0,0 +1,323 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import tensorflow as tf +import gym +import matplotlib.pyplot as plt +from datetime import datetime + + +### avoid crashing on Mac +# doesn't seem to work +from sys import platform as sys_pf +if sys_pf == 'darwin': + import matplotlib + matplotlib.use("TkAgg") + + +# simple feedforward neural net +def ANN(x, layer_sizes, hidden_activation=tf.nn.relu, output_activation=None): + for h in layer_sizes[:-1]: + x = tf.layers.dense(x, units=h, activation=hidden_activation) + return tf.layers.dense(x, units=layer_sizes[-1], activation=output_activation) + + +# get all variables within a scope +def get_vars(scope): + return [x for x in tf.global_variables() if scope in x.name] + + +### Create both the actor and critic networks at once ### +### Q(s, mu(s)) returns the maximum Q for a given state s ### +def CreateNetworks( + s, a, + num_actions, + action_max, + hidden_sizes=(300,), + hidden_activation=tf.nn.relu, + output_activation=tf.tanh): + + with tf.variable_scope('mu'): + mu = action_max * ANN(s, list(hidden_sizes)+[num_actions], hidden_activation, output_activation) + with tf.variable_scope('q'): + input_ = tf.concat([s, a], axis=-1) # (state, action) + q = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) + with tf.variable_scope('q', reuse=True): + # reuse is True, so it reuses the weights from the previously defined Q network + input_ = tf.concat([s, mu], axis=-1) # (state, mu(state)) + q_mu = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) + return mu, q, q_mu + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros([size, act_dim], dtype=np.float32) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.float32) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + +### Implement the DDPG algorithm ### +def ddpg( + env_fn, + ac_kwargs=dict(), + seed=0, + save_folder=None, + num_train_episodes=100, + test_agent_every=25, + replay_size=int(1e6), + gamma=0.99, + decay=0.995, + mu_lr=1e-3, + q_lr=1e-3, + batch_size=100, + start_steps=10000, + action_noise=0.1, + max_episode_length=1000): + + tf.set_random_seed(seed) + np.random.seed(seed) + + env, test_env = env_fn(), env_fn() + + # comment out this line if you don't want to record a video of the agent + if save_folder is not None: + test_env = gym.wrappers.Monitor(test_env, save_folder) + + # get size of state space and action space + num_states = env.observation_space.shape[0] + num_actions = env.action_space.shape[0] + + # Maximum value of action + # Assumes both low and high values are the same + # Assumes all actions have the same bounds + # May NOT be the case for all environments + action_max = env.action_space.high[0] + + # Create Tensorflow placeholders (neural network inputs) + X = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # state + A = tf.placeholder(dtype=tf.float32, shape=(None, num_actions)) # action + X2 = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # next state + R = tf.placeholder(dtype=tf.float32, shape=(None,)) # reward + D = tf.placeholder(dtype=tf.float32, shape=(None,)) # done + + # Main network outputs + with tf.variable_scope('main'): + mu, q, q_mu = CreateNetworks(X, A, num_actions, action_max, **ac_kwargs) + + # Target networks + with tf.variable_scope('target'): + # We don't need the Q network output with arbitrary input action A + # because that's not actually used in our loss functions + # NOTE 1: The state input is X2, NOT X + # We only care about max_a{ Q(s', a) } + # Where this is equal to Q(s', mu(s')) + # This is because it's used in the target calculation: r + gamma * max_a{ Q(s',a) } + # Where s' = X2 + # NOTE 2: We ignore the first 2 networks for the same reason + _, _, q_mu_targ = CreateNetworks(X2, A, num_actions, action_max, **ac_kwargs) + + # Experience replay memory + replay_buffer = ReplayBuffer(obs_dim=num_states, act_dim=num_actions, size=replay_size) + + + # Target value for the Q-network loss + # We use stop_gradient to tell Tensorflow not to differentiate + # q_mu_targ wrt any params + # i.e. consider q_mu_targ values constant + q_target = tf.stop_gradient(R + gamma * (1 - D) * q_mu_targ) + + # DDPG losses + mu_loss = -tf.reduce_mean(q_mu) + q_loss = tf.reduce_mean((q - q_target)**2) + + # Train each network separately + mu_optimizer = tf.train.AdamOptimizer(learning_rate=mu_lr) + q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr) + mu_train_op = mu_optimizer.minimize(mu_loss, var_list=get_vars('main/mu')) + q_train_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q')) + + # Use soft updates to update the target networks + target_update = tf.group( + [tf.assign(v_targ, decay*v_targ + (1 - decay)*v_main) + for v_main, v_targ in zip(get_vars('main'), get_vars('target')) + ] + ) + + # Copy main network params to target networks + target_init = tf.group( + [tf.assign(v_targ, v_main) + for v_main, v_targ in zip(get_vars('main'), get_vars('target')) + ] + ) + + # boilerplate (and copy to the target networks!) + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + sess.run(target_init) + + def get_action(s, noise_scale): + a = sess.run(mu, feed_dict={X: s.reshape(1,-1)})[0] + a += noise_scale * np.random.randn(num_actions) + return np.clip(a, -action_max, action_max) + + test_returns = [] + def test_agent(num_episodes=5): + t0 = datetime.now() + n_steps = 0 + for j in range(num_episodes): + s, episode_return, episode_length, d = test_env.reset(), 0, 0, False + while not (d or (episode_length == max_episode_length)): + # Take deterministic actions at test time (noise_scale=0) + test_env.render() + s, r, d, _ = test_env.step(get_action(s, 0)) + episode_return += r + episode_length += 1 + n_steps += 1 + print('test return:', episode_return, 'episode_length:', episode_length) + test_returns.append(episode_return) + # print("test steps per sec:", n_steps / (datetime.now() - t0).total_seconds()) + + + # Main loop: play episode and train + returns = [] + q_losses = [] + mu_losses = [] + num_steps = 0 + for i_episode in range(num_train_episodes): + + # reset env + s, episode_return, episode_length, d = env.reset(), 0, 0, False + + while not (d or (episode_length == max_episode_length)): + # For the first `start_steps` steps, use randomly sampled actions + # in order to encourage exploration. + if num_steps > start_steps: + a = get_action(s, action_noise) + else: + a = env.action_space.sample() + + # Keep track of the number of steps done + num_steps += 1 + if num_steps == start_steps: + print("USING AGENT ACTIONS NOW") + + # Step the env + s2, r, d, _ = env.step(a) + episode_return += r + episode_length += 1 + + # Ignore the "done" signal if it comes from hitting the time + # horizon (that is, when it's an artificial terminal signal + # that isn't based on the agent's state) + d_store = False if episode_length == max_episode_length else d + + # Store experience to replay buffer + replay_buffer.store(s, a, r, s2, d_store) + + # Assign next state to be the current state on the next round + s = s2 + + # Perform the updates + for _ in range(episode_length): + batch = replay_buffer.sample_batch(batch_size) + feed_dict = { + X: batch['s'], + X2: batch['s2'], + A: batch['a'], + R: batch['r'], + D: batch['d'] + } + + # Q network update + # Note: plot the Q loss if you want + ql, _, _ = sess.run([q_loss, q, q_train_op], feed_dict) + q_losses.append(ql) + + # Policy update + # (And target networks update) + # Note: plot the mu loss if you want + mul, _, _ = sess.run([mu_loss, mu_train_op, target_update], feed_dict) + mu_losses.append(mul) + + print("Episode:", i_episode + 1, "Return:", episode_return, 'episode_length:', episode_length) + returns.append(episode_return) + + # Test the agent + if i_episode > 0 and i_episode % test_agent_every == 0: + test_agent() + + # on Mac, plotting results in an error, so just save the results for later + # if you're not on Mac, feel free to uncomment the below lines + np.savez('ddpg_results.npz', train=returns, test=test_returns, q_losses=q_losses, mu_losses=mu_losses) + + # plt.plot(returns) + # plt.plot(smooth(np.array(returns))) + # plt.title("Train returns") + # plt.show() + + # plt.plot(test_returns) + # plt.plot(smooth(np.array(test_returns))) + # plt.title("Test returns") + # plt.show() + + # plt.plot(q_losses) + # plt.title('q_losses') + # plt.show() + + # plt.plot(mu_losses) + # plt.title('mu_losses') + # plt.show() + + +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + # parser.add_argument('--env', type=str, default='HalfCheetah-v2') + parser.add_argument('--env', type=str, default='Pendulum-v0') + parser.add_argument('--hidden_layer_sizes', type=int, default=300) + parser.add_argument('--num_layers', type=int, default=1) + parser.add_argument('--gamma', type=float, default=0.99) + parser.add_argument('--seed', type=int, default=0) + parser.add_argument('--num_train_episodes', type=int, default=200) + parser.add_argument('--save_folder', type=str, default='ddpg_monitor') + args = parser.parse_args() + + + ddpg( + lambda : gym.make(args.env), + ac_kwargs=dict(hidden_sizes=[args.hidden_layer_sizes]*args.num_layers), + gamma=args.gamma, + seed=args.seed, + save_folder=args.save_folder, + num_train_episodes=args.num_train_episodes, + ) diff --git a/rl3/es_flappy.py b/rl3/es_flappy.py new file mode 100644 index 00000000..6002ef78 --- /dev/null +++ b/rl3/es_flappy.py @@ -0,0 +1,252 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import matplotlib.pyplot as plt + +from datetime import datetime + +# import multiprocessing +# from multiprocessing.dummy import Pool + +# INSTRUCTIONS FOR INSTALLING PLE: +# https://pygame-learning-environment.readthedocs.io/en/latest/user/home.html +from ple import PLE +from ple.games.flappybird import FlappyBird + +import sys + + +# thread pool for parallelization +# pool = Pool(4) + + +HISTORY_LENGTH = 1 + + +class Env: + def __init__(self): + self.game = FlappyBird(pipe_gap=125) + self.env = PLE(self.game, fps=30, display_screen=False) + self.env.init() + self.env.getGameState = self.game.getGameState # maybe not necessary + + # by convention we want to use (0,1) + # but the game uses (None, 119) + self.action_map = self.env.getActionSet() #[None, 119] + + def step(self, action): + action = self.action_map[action] + reward = self.env.act(action) + done = self.env.game_over() + obs = self.get_observation() + # don't bother returning an info dictionary like gym + return obs, reward, done + + def reset(self): + self.env.reset_game() + return self.get_observation() + + def get_observation(self): + # game state returns a dictionary which describes + # the meaning of each value + # we only want the values + obs = self.env.getGameState() + return np.array(list(obs.values())) + + def set_display(self, boolean_value): + self.env.display_screen = boolean_value + + +# make a global environment to be used throughout the script +env = Env() + + +### neural network + +# hyperparameters +D = len(env.reset())*HISTORY_LENGTH +M = 50 +K = 2 + +def softmax(a): + c = np.max(a, axis=1, keepdims=True) + e = np.exp(a - c) + return e / e.sum(axis=-1, keepdims=True) + +def relu(x): + return x * (x > 0) + +class ANN: + def __init__(self, D, M, K, f=relu): + self.D = D + self.M = M + self.K = K + self.f = f + + def init(self): + D, M, K = self.D, self.M, self.K + self.W1 = np.random.randn(D, M) / np.sqrt(D) + # self.W1 = np.zeros((D, M)) + self.b1 = np.zeros(M) + self.W2 = np.random.randn(M, K) / np.sqrt(M) + # self.W2 = np.zeros((M, K)) + self.b2 = np.zeros(K) + + def forward(self, X): + Z = self.f(X.dot(self.W1) + self.b1) + return softmax(Z.dot(self.W2) + self.b2) + + def sample_action(self, x): + # assume input is a single state of size (D,) + # first make it (N, D) to fit ML conventions + X = np.atleast_2d(x) + P = self.forward(X) + p = P[0] # the first row + # return np.random.choice(len(p), p=p) + return np.argmax(p) + + def get_params(self): + # return a flat array of parameters + return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2]) + + def get_params_dict(self): + return { + 'W1': self.W1, + 'b1': self.b1, + 'W2': self.W2, + 'b2': self.b2, + } + + def set_params(self, params): + # params is a flat list + # unflatten into individual weights + D, M, K = self.D, self.M, self.K + self.W1 = params[:D * M].reshape(D, M) + self.b1 = params[D * M:D * M + M] + self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K) + self.b2 = params[-K:] + + +def evolution_strategy( + f, + population_size, + sigma, + lr, + initial_params, + num_iters): + + # assume initial params is a 1-D array + num_params = len(initial_params) + reward_per_iteration = np.zeros(num_iters) + + params = initial_params + for t in range(num_iters): + t0 = datetime.now() + N = np.random.randn(population_size, num_params) + + ### slow way + R = np.zeros(population_size) # stores the reward + + # loop through each "offspring" + for j in range(population_size): + params_try = params + sigma*N[j] + R[j] = f(params_try) + + ### fast way + # R = pool.map(f, [params + sigma*N[j] for j in range(population_size)]) + # R = np.array(R) + + m = R.mean() + s = R.std() + if s == 0: + # we can't apply the following equation + print("Skipping") + continue + + A = (R - m) / s + reward_per_iteration[t] = m + params = params + lr/(population_size*sigma) * np.dot(N.T, A) + + # update the learning rate + lr *= 0.992354 + # sigma *= 0.99 + + print("Iter:", t, "Avg Reward: %.3f" % m, "Max:", R.max(), "Duration:", (datetime.now() - t0)) + + return params, reward_per_iteration + + +def reward_function(params): + model = ANN(D, M, K) + model.set_params(params) + + # play one episode and return the total reward + episode_reward = 0 + episode_length = 0 # not sure if it will be used + done = False + obs = env.reset() + obs_dim = len(obs) + if HISTORY_LENGTH > 1: + state = np.zeros(HISTORY_LENGTH*obs_dim) # current state + state[-obs_dim:] = obs + else: + state = obs + while not done: + # get the action + action = model.sample_action(state) + + # perform the action + obs, reward, done = env.step(action) + + # update total reward + episode_reward += reward + episode_length += 1 + + # update state + if HISTORY_LENGTH > 1: + state = np.roll(state, -obs_dim) + state[-obs_dim:] = obs + else: + state = obs + return episode_reward + + +if __name__ == '__main__': + model = ANN(D, M, K) + + if len(sys.argv) > 1 and sys.argv[1] == 'play': + # play with a saved model + j = np.load('es_flappy_results.npz') + best_params = np.concatenate([j['W1'].flatten(), j['b1'], j['W2'].flatten(), j['b2']]) + + # in case initial shapes are not correct + D, M = j['W1'].shape + K = len(j['b2']) + model.D, model.M, model.K = D, M, K + else: + # train and save + model.init() + params = model.get_params() + best_params, rewards = evolution_strategy( + f=reward_function, + population_size=30, + sigma=0.1, + lr=0.03, + initial_params=params, + num_iters=300, + ) + + # plot the rewards per iteration + # plt.plot(rewards) + # plt.show() + model.set_params(best_params) + np.savez( + 'es_flappy_results.npz', + train=rewards, + **model.get_params_dict(), + ) + + # play 5 test episodes + env.set_display(True) + for _ in range(5): + print("Test:", reward_function(best_params)) + diff --git a/rl3/es_mnist.py b/rl3/es_mnist.py new file mode 100644 index 00000000..5c1a5ec5 --- /dev/null +++ b/rl3/es_mnist.py @@ -0,0 +1,159 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from datetime import datetime + +import multiprocessing +from multiprocessing.dummy import Pool + + +# thread pool for parallelization +pool = Pool(4) + +# get the data from: https://www.kaggle.com/c/digit-recognizer +# although you can feel free to use any dataset +df = pd.read_csv('../large_files/train.csv') + +# convert to numpy +data = df.values.astype(np.float32) + +# randomize and split the data +np.random.shuffle(data) + +X = data[:, 1:] / 255. +Y = data[:, 0].astype(np.int32) + +Xtrain = X[:-1000] +Ytrain = Y[:-1000] +Xtest = X[-1000:] +Ytest = Y[-1000:] +print("Finished loading in and splitting data") + +# layer sizes +D = Xtrain.shape[1] +M = 100 +K = len(set(Y)) + + +def softmax(a): + c = np.max(a, axis=1, keepdims=True) + e = np.exp(a - c) + return e / e.sum(axis=-1, keepdims=True) + + +def relu(x): + return x * (x > 0) + + +def log_likelihood(Y, P): + # assume Y is not one-hot encoded + N = len(Y) + return np.log(P[np.arange(N), Y]).mean() + + +class ANN: + def __init__(self, D, M, K): + self.D = D + self.M = M + self.K = K + + def init(self): + D, M, K = self.D, self.M, self.K + self.W1 = np.random.randn(D, M) / np.sqrt(D) + self.b1 = np.zeros(M) + self.W2 = np.random.randn(M, K) / np.sqrt(M) + self.b2 = np.zeros(K) + + def forward(self, X): + Z = np.tanh(X.dot(self.W1) + self.b1) + return softmax(Z.dot(self.W2) + self.b2) + + def score(self, X, Y): + P = np.argmax(self.forward(X), axis=1) + return np.mean(Y == P) + + def get_params(self): + # return a flat array of parameters + return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2]) + + def set_params(self, params): + # params is a flat list + # unflatten into individual weights + D, M, K = self.D, self.M, self.K + self.W1 = params[:D * M].reshape(D, M) + self.b1 = params[D * M:D * M + M] + self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K) + self.b2 = params[-K:] + + +def evolution_strategy( + f, + population_size, + sigma, + lr, + initial_params, + num_iters): + + # assume initial params is a 1-D array + num_params = len(initial_params) + reward_per_iteration = np.zeros(num_iters) + + params = initial_params + for t in range(num_iters): + t0 = datetime.now() + N = np.random.randn(population_size, num_params) + + # ### slow way + # R = np.zeros(population_size) # stores the reward + + # # loop through each "offspring" + # for j in range(population_size): + # params_try = params + sigma*N[j] + # R[j] = f(params_try) + + ### fast way + R = pool.map(f, [params + sigma*N[j] for j in range(population_size)]) + R = np.array(R) + + m = R.mean() + A = (R - m) / R.std() + reward_per_iteration[t] = m + params = params + lr/(population_size*sigma) * np.dot(N.T, A) + print("Iter:", t, "Avg Reward:", m, "Duration:", (datetime.now() - t0)) + + return params, reward_per_iteration + + +def reward_function(params): + model = ANN(D, M, K) + model.set_params(params) + # Ptrain = model.forward(Xtrain) + # return log_likelihood(Ytrain, Ptrain) + return model.score(Xtrain, Ytrain) + + + +if __name__ == '__main__': + model = ANN(D, M, K) + model.init() + params = model.get_params() + best_params, rewards = evolution_strategy( + f=reward_function, + population_size=50, + sigma=0.1, + lr=0.2, + initial_params=params, + num_iters=600, + ) + + # plot the rewards per iteration + plt.plot(rewards) + plt.show() + + # final train and test accuracy + model.set_params(best_params) + print("Train score:", model.score(Xtrain, Ytrain)) + print("Test score:", model.score(Xtest, Ytest)) + diff --git a/rl3/es_mujoco.py b/rl3/es_mujoco.py new file mode 100644 index 00000000..ce43f983 --- /dev/null +++ b/rl3/es_mujoco.py @@ -0,0 +1,204 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import matplotlib.pyplot as plt + +from datetime import datetime + +import multiprocessing +from multiprocessing.dummy import Pool + +import gym +import sys + + +# environment +ENV_NAME = 'HalfCheetah-v2' + + +# thread pool for parallelization +pool = Pool(4) + + +### neural network + +# hyperparameters +env = gym.make(ENV_NAME) +D = len(env.reset()) +M = 300 +K = env.action_space.shape[0] +action_max = env.action_space.high[0] + + +def relu(x): + return x * (x > 0) + +# def output_activation(x): +# return action_max * np.tanh(x) + +class ANN: + def __init__(self, D, M, K, f=relu): + self.D = D + self.M = M + self.K = K + self.f = f + + def init(self): + D, M, K = self.D, self.M, self.K + self.W1 = np.random.randn(D, M) / np.sqrt(D) + # self.W1 = np.zeros((D, M)) + self.b1 = np.zeros(M) + self.W2 = np.random.randn(M, K) / np.sqrt(M) + # self.W2 = np.zeros((M, K)) + self.b2 = np.zeros(K) + + def forward(self, X): + Z = self.f(X.dot(self.W1) + self.b1) + return np.tanh(Z.dot(self.W2) + self.b2) * action_max + + def sample_action(self, x): + # assume input is a single state of size (D,) + # first make it (N, D) to fit ML conventions + X = np.atleast_2d(x) + Y = self.forward(X) + return Y[0] # the first row + + def get_params(self): + # return a flat array of parameters + return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2]) + + def get_params_dict(self): + return { + 'W1': self.W1, + 'b1': self.b1, + 'W2': self.W2, + 'b2': self.b2, + } + + def set_params(self, params): + # params is a flat list + # unflatten into individual weights + D, M, K = self.D, self.M, self.K + self.W1 = params[:D * M].reshape(D, M) + self.b1 = params[D * M:D * M + M] + self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K) + self.b2 = params[-K:] + + +def evolution_strategy( + f, + population_size, + sigma, + lr, + initial_params, + num_iters): + + # assume initial params is a 1-D array + num_params = len(initial_params) + reward_per_iteration = np.zeros(num_iters) + + params = initial_params + for t in range(num_iters): + t0 = datetime.now() + N = np.random.randn(population_size, num_params) + + # ### slow way + # R = np.zeros(population_size) # stores the reward + + # # loop through each "offspring" + # for j in range(population_size): + # params_try = params + sigma*N[j] + # R[j] = f(params_try) + + ### fast way + R = pool.map(f, [params + sigma*N[j] for j in range(population_size)]) + R = np.array(R) + + m = R.mean() + s = R.std() + if s == 0: + # we can't apply the following equation + print("Skipping") + continue + + A = (R - m) / s + reward_per_iteration[t] = m + params = params + lr/(population_size*sigma) * np.dot(N.T, A) + + # update the learning rate + # lr *= 0.992354 + # sigma *= 0.99 + + print("Iter:", t, "Avg Reward: %.3f" % m, "Max:", R.max(), "Duration:", (datetime.now() - t0)) + + return params, reward_per_iteration + + +def reward_function(params, display=False): + model = ANN(D, M, K) + model.set_params(params) + + env = gym.make(ENV_NAME) + if display: + env = gym.wrappers.Monitor(env, 'es_monitor') + + # play one episode and return the total reward + episode_reward = 0 + episode_length = 0 # not sure if it will be used + done = False + state = env.reset() + while not done: + # display the env + if display: + env.render() + + # get the action + action = model.sample_action(state) + + # perform the action + state, reward, done, _ = env.step(action) + + # update total reward + episode_reward += reward + episode_length += 1 + + return episode_reward + + +if __name__ == '__main__': + model = ANN(D, M, K) + + if len(sys.argv) > 1 and sys.argv[1] == 'play': + # play with a saved model + j = np.load('es_mujoco_results.npz') + best_params = np.concatenate([j['W1'].flatten(), j['b1'], j['W2'].flatten(), j['b2']]) + + # in case initial shapes are not correct + D, M = j['W1'].shape + K = len(j['b2']) + model.D, model.M, model.K = D, M, K + else: + # train and save + model.init() + params = model.get_params() + best_params, rewards = evolution_strategy( + f=reward_function, + population_size=30, + sigma=0.1, + lr=0.03, + initial_params=params, + num_iters=300, + ) + + # plot the rewards per iteration + # plt.plot(rewards) + # plt.show() + model.set_params(best_params) + np.savez( + 'es_mujoco_results.npz', + train=rewards, + **model.get_params_dict(), + ) + + # play test episode + print("Test:", reward_function(best_params, display=True)) + diff --git a/rl3/es_simple.py b/rl3/es_simple.py new file mode 100644 index 00000000..0e9739d9 --- /dev/null +++ b/rl3/es_simple.py @@ -0,0 +1,59 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import matplotlib.pyplot as plt + + +def evolution_strategy( + f, + population_size, + sigma, + lr, + initial_params, + num_iters): + + # assume initial params is a 1-D array + num_params = len(initial_params) + reward_per_iteration = np.zeros(num_iters) + + params = initial_params + for t in range(num_iters): + N = np.random.randn(population_size, num_params) + R = np.zeros(population_size) # stores the reward + + # loop through each "offspring" + for j in range(population_size): + params_try = params + sigma*N[j] + R[j] = f(params_try) + + m = R.mean() + A = (R - m) / R.std() + reward_per_iteration[t] = m + params = params + lr/(population_size*sigma) * np.dot(N.T, A) + + return params, reward_per_iteration + + +def reward_function(params): + x0 = params[0] + x1 = params[1] + x2 = params[2] + return -(x0**2 + 0.1*(x1 - 1)**2 + 0.5*(x2 + 2)**2) + + +if __name__ == '__main__': + best_params, rewards = evolution_strategy( + f=reward_function, + population_size=50, + sigma=0.1, + lr=1e-3, + initial_params=np.random.randn(3), + num_iters=500, + ) + + # plot the rewards per iteration + plt.plot(rewards) + plt.show() + + # final params + print("Final params:", best_params) + diff --git a/rl3/extra_reading.txt b/rl3/extra_reading.txt new file mode 100644 index 00000000..b190c6ac --- /dev/null +++ b/rl3/extra_reading.txt @@ -0,0 +1,22 @@ +How do I sample from a discrete (categorical) distribution in log space? +https://stats.stackexchange.com/questions/64081/how-do-i-sample-from-a-discrete-categorical-distribution-in-log-space + +A2C (Advantage Actor-Critic) +https://openai.com/blog/baselines-acktr-a2c/ + +DDPG (Deep Deterministic Policy Gradient) +"Continuous control with deep reinforcement learning" +https://arxiv.org/abs/1509.02971 + +Deterministic Policy Gradient Algorithms +http://proceedings.mlr.press/v32/silver14.pdf + +ES (Evolution Strategies) +"Evolution Strategies as a Scalable Alternative to Reinforcement Learning" +https://arxiv.org/abs/1703.03864 + +Trust Region Evolution Strategies +https://www.microsoft.com/en-us/research/uploads/prod/2018/11/trust-region-evolution-strategies.pdf + +Addressing Function Approximation Error in Actor-Critic Methods +https://arxiv.org/abs/1802.09477 \ No newline at end of file diff --git a/rl3/flappy2envs.py b/rl3/flappy2envs.py new file mode 100644 index 00000000..2ce2bf5d --- /dev/null +++ b/rl3/flappy2envs.py @@ -0,0 +1,184 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import matplotlib.pyplot as plt + +from datetime import datetime + +from ple import PLE +from ple.games.flappybird import FlappyBird + +import sys + +from threading import Thread + + + +HISTORY_LENGTH = 1 + + +class Env: + def __init__(self): + self.game = FlappyBird(pipe_gap=125) + self.env = PLE(self.game, fps=30, display_screen=True) + self.env.init() + self.env.getGameState = self.game.getGameState # maybe not necessary + + # by convention we want to use (0,1) + # but the game uses (None, 119) + self.action_map = self.env.getActionSet() #[None, 119] + + def step(self, action): + action = self.action_map[action] + reward = self.env.act(action) + done = self.env.game_over() + obs = self.get_observation() + # don't bother returning an info dictionary like gym + return obs, reward, done + + def reset(self): + self.env.reset_game() + return self.get_observation() + + def get_observation(self): + # game state returns a dictionary which describes + # the meaning of each value + # we only want the values + obs = self.env.getGameState() + return np.array(list(obs.values())) + + def set_display(self, boolean_value): + self.env.display_screen = boolean_value + + +# make a global environment to be used throughout the script +env = Env() + + +### neural network + +# hyperparameters +D = len(env.reset())*HISTORY_LENGTH +M = 50 +K = 2 + +def softmax(a): + c = np.max(a, axis=1, keepdims=True) + e = np.exp(a - c) + return e / e.sum(axis=-1, keepdims=True) + +def relu(x): + return x * (x > 0) + +class ANN: + def __init__(self, D, M, K, f=relu): + self.D = D + self.M = M + self.K = K + self.f = f + + def init(self): + D, M, K = self.D, self.M, self.K + self.W1 = np.random.randn(D, M) / np.sqrt(D) + # self.W1 = np.zeros((D, M)) + self.b1 = np.zeros(M) + self.W2 = np.random.randn(M, K) / np.sqrt(M) + # self.W2 = np.zeros((M, K)) + self.b2 = np.zeros(K) + + def forward(self, X): + Z = self.f(X.dot(self.W1) + self.b1) + return softmax(Z.dot(self.W2) + self.b2) + + def sample_action(self, x): + # assume input is a single state of size (D,) + # first make it (N, D) to fit ML conventions + X = np.atleast_2d(x) + P = self.forward(X) + p = P[0] # the first row + # return np.random.choice(len(p), p=p) + return np.argmax(p) + + def score(self, X, Y): + P = np.argmax(self.forward(X), axis=1) + return np.mean(Y == P) + + def get_params(self): + # return a flat array of parameters + return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2]) + + def get_params_dict(self): + return { + 'W1': self.W1, + 'b1': self.b1, + 'W2': self.W2, + 'b2': self.b2, + } + + def set_params(self, params): + # params is a flat list + # unflatten into individual weights + D, M, K = self.D, self.M, self.K + self.W1 = params[:D * M].reshape(D, M) + self.b1 = params[D * M:D * M + M] + self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K) + self.b2 = params[-K:] + + + +env1, env2 = Env(), Env() + + + + +def reward_function(params, env): + model = ANN(D, M, K) + model.set_params(params) + + # play one episode and return the total reward + episode_reward = 0 + episode_length = 0 # not sure if it will be used + done = False + obs = env.reset() + obs_dim = len(obs) + if HISTORY_LENGTH > 1: + state = np.zeros(HISTORY_LENGTH*obs_dim) # current state + state[obs_dim:] = obs + else: + state = obs + while not done: + # get the action + action = model.sample_action(state) + + # perform the action + obs, reward, done = env.step(action) + + # update total reward + episode_reward += reward + episode_length += 1 + + # update state + if HISTORY_LENGTH > 1: + state = np.roll(state, -obs_dim) + state[-obs_dim:] = obs + else: + state = obs + print("Reward:", episode_reward) + + +if __name__ == '__main__': + + j = np.load('es_flappy_results.npz') + best_params = np.concatenate([j['W1'].flatten(), j['b1'], j['W2'].flatten(), j['b2']]) + + # in case D isn't correct + D, M = j['W1'].shape + K = len(j['b2']) + + t1 = Thread(target=reward_function, args=(best_params, env1)) + t2 = Thread(target=reward_function, args=(best_params, env2)) + t1.start() + t2.start() + t1.join() + t2.join() + + diff --git a/rl3/gym_review.py b/rl3/gym_review.py new file mode 100644 index 00000000..26733a58 --- /dev/null +++ b/rl3/gym_review.py @@ -0,0 +1,60 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import gym +import numpy as np +import matplotlib.pyplot as plt + + +def get_action(s, w): + return 1 if s.dot(w) > 0 else 0 + + +def play_one_episode(env, params): + observation = env.reset() + done = False + t = 0 + r = 0 + + while not done and t < 10000: + t += 1 + action = get_action(observation, params) + observation, reward, done, info = env.step(action) + r += reward + + return r + + +def play_multiple_episodes(env, T, params): + episode_rewards = np.empty(T) + + for i in range(T): + episode_rewards[i] = play_one_episode(env, params) + + avg_reward = episode_rewards.mean() + print("avg reward:", avg_reward) + return avg_reward + + +def random_search(env): + episode_rewards = [] + best = 0 + params = None + for t in range(100): + new_params = np.random.random(4)*2 - 1 + avg_reward = play_multiple_episodes(env, 100, new_params) + episode_rewards.append(avg_reward) + + if avg_reward > best: + params = new_params + best = avg_reward + return episode_rewards, params + + +if __name__ == '__main__': + env = gym.make('CartPole-v0') + episode_rewards, params = random_search(env) + plt.plot(episode_rewards) + plt.show() + + # play a final set of episodes + print("***Final run with final weights***") + play_multiple_episodes(env, 100, params) diff --git a/rl3/plot_ddpg_result.py b/rl3/plot_ddpg_result.py new file mode 100644 index 00000000..33e549db --- /dev/null +++ b/rl3/plot_ddpg_result.py @@ -0,0 +1,38 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import json +import matplotlib.pyplot as plt +import numpy as np + +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + +j = np.load('ddpg_results.npz') + +returns = j['train'] +test_returns = j['test'] +q_losses = j['q_losses'] +mu_losses = j['mu_losses'] + +plt.plot(returns) +plt.plot(smooth(np.array(returns))) +plt.title("Train returns") +plt.show() + +plt.plot(test_returns) +plt.plot(smooth(np.array(test_returns))) +plt.title("Test returns") +plt.show() + +plt.plot(q_losses) +plt.title('q_losses') +plt.show() + +plt.plot(mu_losses) +plt.title('mu_losses') +plt.show() \ No newline at end of file diff --git a/rl3/plot_es_flappy_results.py b/rl3/plot_es_flappy_results.py new file mode 100644 index 00000000..b0b725af --- /dev/null +++ b/rl3/plot_es_flappy_results.py @@ -0,0 +1,21 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import matplotlib.pyplot as plt +import numpy as np + +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + +j = np.load('es_flappy_results.npz') + +returns = j['train'] + +plt.plot(returns) +plt.plot(smooth(np.array(returns))) +plt.title("Train returns") +plt.show() \ No newline at end of file diff --git a/rl3/plot_es_mujoco_results.py b/rl3/plot_es_mujoco_results.py new file mode 100644 index 00000000..10a10617 --- /dev/null +++ b/rl3/plot_es_mujoco_results.py @@ -0,0 +1,21 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import matplotlib.pyplot as plt +import numpy as np + +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + +j = np.load('es_mujoco_results.npz') + +returns = j['train'] + +plt.plot(returns) +plt.plot(smooth(np.array(returns))) +plt.title("Train returns") +plt.show() \ No newline at end of file diff --git a/rl3/sample_test.py b/rl3/sample_test.py new file mode 100644 index 00000000..48d82f1d --- /dev/null +++ b/rl3/sample_test.py @@ -0,0 +1,16 @@ +# https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence +import numpy as np +import matplotlib.pyplot as plt + +logits = np.log([0.1, 0.2, 0.3, 0.4]) + +samples = [] + +for _ in range(10000): + noise = np.random.random(len(logits)) + sample = np.argmax(logits - np.log(-np.log(noise))) + samples.append(sample) + + +plt.hist(samples) +plt.show() \ No newline at end of file From 10ab7580a1934e3c5338726c3a11a169590a9a27 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 23 Jun 2019 14:42:54 -0400 Subject: [PATCH 142/329] update --- logistic_regression_class/l1_regularization.py | 11 +++++++++++ logistic_regression_class/logistic3.py | 1 - logistic_regression_class/logistic4.py | 1 - logistic_regression_class/logistic_donut.py | 1 - 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/logistic_regression_class/l1_regularization.py b/logistic_regression_class/l1_regularization.py index 86b79707..5bcbd809 100644 --- a/logistic_regression_class/l1_regularization.py +++ b/logistic_regression_class/l1_regularization.py @@ -9,6 +9,7 @@ import numpy as np +from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt def sigmoid(z): @@ -19,6 +20,7 @@ def sigmoid(z): # uniformly distributed numbers between -5, +5 X = (np.random.random((N, D)) - 0.5)*10 +# X = (np.random.randn(N, D) - 0.5)*10 # true weights - only the first 3 dimensions of X affect Y true_w = np.array([1, 0.5, -0.5] + [0]*(D - 3)) @@ -26,6 +28,15 @@ def sigmoid(z): # generate Y - add noise with variance 0.5 Y = np.round(sigmoid(X.dot(true_w) + np.random.randn(N)*0.5)) + + + +# let's plot the data to see what it looks like +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], X[:,2], c=Y) +plt.show() + # perform gradient descent to find w costs = [] # keep track of squared error cost w = np.random.randn(D) / np.sqrt(D) # randomly initialize w diff --git a/logistic_regression_class/logistic3.py b/logistic_regression_class/logistic3.py index 930d9d26..67fd027a 100644 --- a/logistic_regression_class/logistic3.py +++ b/logistic_regression_class/logistic3.py @@ -65,7 +65,6 @@ def cross_entropy(T, Y): print(cross_entropy(T, Y)) # gradient descent weight udpate - # w += learning_rate * np.dot((T - Y).T, Xb) # old w += learning_rate * Xb.T.dot(T - Y) # recalculate Y diff --git a/logistic_regression_class/logistic4.py b/logistic_regression_class/logistic4.py index 77d9d2cf..6fa77c36 100644 --- a/logistic_regression_class/logistic4.py +++ b/logistic_regression_class/logistic4.py @@ -64,7 +64,6 @@ def cross_entropy(T, Y): print(cross_entropy(T, Y)) # gradient descent weight udpate with regularization - # w += learning_rate * ( np.dot((T - Y).T, Xb) - 0.1*w ) # old w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w ) # recalculate Y diff --git a/logistic_regression_class/logistic_donut.py b/logistic_regression_class/logistic_donut.py index de5ec9fa..db8cc50a 100644 --- a/logistic_regression_class/logistic_donut.py +++ b/logistic_regression_class/logistic_donut.py @@ -73,7 +73,6 @@ def cross_entropy(T, Y): print(e) # gradient descent weight udpate with regularization - # w += learning_rate * ( np.dot((T - Y).T, Xb) - 0.01*w ) # old w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w ) # recalculate Y From 2102585724717a8dd9379d3a6832881acc3f4a1b Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 24 Jun 2019 01:19:54 -0400 Subject: [PATCH 143/329] update --- ann_class/extra_reading.txt | 8 ++++++++ hmm_class/extra_reading.txt | 8 ++++++++ 2 files changed, 16 insertions(+) create mode 100644 ann_class/extra_reading.txt create mode 100644 hmm_class/extra_reading.txt diff --git a/ann_class/extra_reading.txt b/ann_class/extra_reading.txt new file mode 100644 index 00000000..6233e9bf --- /dev/null +++ b/ann_class/extra_reading.txt @@ -0,0 +1,8 @@ +The Chain Rule of Calculus +http://tutorial.math.lamar.edu/Classes/CalcI/ChainRule.aspx + +Yes you should understand backprop by Andrej Karpathy +https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b + +The Matrix Cookbook +https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf \ No newline at end of file diff --git a/hmm_class/extra_reading.txt b/hmm_class/extra_reading.txt new file mode 100644 index 00000000..c0bb93d1 --- /dev/null +++ b/hmm_class/extra_reading.txt @@ -0,0 +1,8 @@ +A Tutorial on Hidden Markov Models and Selected Applications in Speech Recognition +https://www.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf + +Some Mathematics for HMM by Dawei Shen +https://pdfs.semanticscholar.org/4ce1/9ab0e07da9aa10be1c336400c8e4d8fc36c5.pdf + +A Revealing Introduction to Hidden Markov Models +https://www.cs.sjsu.edu/~stamp/RUA/HMM.pdf \ No newline at end of file From d77cfabeef58ac0d08912f0fb4c515b60a608a88 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 24 Jun 2019 01:21:21 -0400 Subject: [PATCH 144/329] update --- ann_class/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ann_class/extra_reading.txt b/ann_class/extra_reading.txt index 6233e9bf..3d4fde45 100644 --- a/ann_class/extra_reading.txt +++ b/ann_class/extra_reading.txt @@ -5,4 +5,7 @@ Yes you should understand backprop by Andrej Karpathy https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b The Matrix Cookbook -https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf \ No newline at end of file +https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf + +Rumelhart, D. E., Hinton, G. E., and Williams, R. J. (1986) Learning representations by back-propagating errors. +https://www.iro.umontreal.ca/~vincentp/ift3395/lectures/backprop_old.pdf \ No newline at end of file From f4e0c2c752c84b86136c348cd140e440240bd6fd Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 24 Jun 2019 01:22:54 -0400 Subject: [PATCH 145/329] update --- cnn_class/extra_reading.txt | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 cnn_class/extra_reading.txt diff --git a/cnn_class/extra_reading.txt b/cnn_class/extra_reading.txt new file mode 100644 index 00000000..7cf34890 --- /dev/null +++ b/cnn_class/extra_reading.txt @@ -0,0 +1,5 @@ +Gradient-Based Learning Applied to Document Recognition +http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf + +ImageNet Classification with Deep Convolutional Neural Networks +https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf \ No newline at end of file From 4e80198aa5a33fce5540d8c04b27f9c8a92dbcce Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 24 Jun 2019 01:28:44 -0400 Subject: [PATCH 146/329] update --- ab_testing/extra_reading.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 ab_testing/extra_reading.txt diff --git a/ab_testing/extra_reading.txt b/ab_testing/extra_reading.txt new file mode 100644 index 00000000..668f896a --- /dev/null +++ b/ab_testing/extra_reading.txt @@ -0,0 +1,17 @@ +Algorithms for the multi-armed bandit problem +https://www.cs.mcgill.ca/~vkules/bandits.pdf + +UCB REVISITED: IMPROVED REGRET BOUNDS FOR THE STOCHASTIC MULTI-ARMED BANDIT PROBLEM +http://personal.unileoben.ac.at/rortner/Pubs/UCBRev.pdf + +Finite-time Analysis of the Multiarmed Bandit Problem +https://link.springer.com/article/10.1023/A:1013689704352 + +A Tutorial on Thompson Sampling +https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf + +An Empirical Evaluation of Thompson Sampling +https://papers.nips.cc/paper/4321-an-empirical-evaluation-of-thompson-sampling.pdf + +Analysis of Thompson Sampling for the Multi-armed Bandit Problem +http://proceedings.mlr.press/v23/agrawal12/agrawal12.pdf \ No newline at end of file From 1cdeb4621a5fb2da1041fd311b40e149c152bdf9 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 30 Jun 2019 16:22:25 -0400 Subject: [PATCH 147/329] update --- tf2.0/fake_util.py | 4 ++ tf2.0/moore.csv | 162 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 166 insertions(+) create mode 100644 tf2.0/fake_util.py create mode 100644 tf2.0/moore.csv diff --git a/tf2.0/fake_util.py b/tf2.0/fake_util.py new file mode 100644 index 00000000..49dd3a3f --- /dev/null +++ b/tf2.0/fake_util.py @@ -0,0 +1,4 @@ +# Used for an example only + +def my_useful_function(): + print("hello world") \ No newline at end of file diff --git a/tf2.0/moore.csv b/tf2.0/moore.csv new file mode 100644 index 00000000..ea97403f --- /dev/null +++ b/tf2.0/moore.csv @@ -0,0 +1,162 @@ +1971,2300 +1972,3500 +1973,2500 +1973,2500 +1974,4100 +1974,4500 +1974,8000 +1975,3510 +1976,5000 +1976,8500 +1976,6500 +1978,9000 +1978,29000 +1979,17500 +1979,29000 +1979,68000 +1981,11500 +1982,55000 +1982,134000 +1983,22000 +1984,63000 +1984,190000 +1985,275000 +1985,25000 +1985,16000 +1986,110000 +1986,375000 +1986,30000 +1987,385000 +1987,730000 +1987,273000 +1987,553000 +1988,180000 +1988,250000 +1989,600000 +1989,1000000 +1989,1180235 +1989,310000 +1990,1200000 +1991,1350000 +1991,35000 +1992,600000 +1992,900000 +1993,2800000 +1993,3100000 +1994,578977 +1994,2500000 +1995,2500000 +1999,111000 +1995,5500000 +1996,4300000 +1997,10000000 +1997,7500000 +1997,8800000 +1998,7500000 +1999,9500000 +1999,13500000 +2000,21000000 +2000,21000000 +1999,27400000 +1999,21300000 +1999,22000000 +2000,42000000 +2001,191000000 +2001,45000000 +2002,55000000 +2004,112000000 +2004,400000000 +2005,169000000 +2006,184000000 +2005,228000000 +2006,362000000 +2007,540000000 +2008,47000000 +2003,54300000 +2003,105900000 +2002,220000000 +2005,165000000 +2005,250000000 +2006,291000000 +2007,169000000 +2003,410000000 +2008,600000000 +2009,760000000 +2011,1870000000 +2012,432000000 +2007,463000000 +2007,26000000 +2008,230000000 +2004,592000000 +2007,411000000 +2008,731000000 +2008,758000000 +2007,789000000 +2009,904000000 +2010,1000000000 +2012,2990000000 +2013,1000000000 +2011,1160000000 +2010,1170000000 +2010,1200000000 +2012,1200000000 +2012,1303000000 +2010,1400000000 +2012,1400000000 +2014,1400000000 +2006,1700000000 +2015,1750000000 +2013,1860000000 +2015,1900000000 +2008,1900000000 +2010,2000000000 +2014,2000000000 +2015,2000000000 +2015,3000000000 +2012,2100000000 +2011,2270000000 +2010,2300000000 +2014,2600000000 +2011,2600000000 +2012,2750000000 +2014,3000000000 +2016,3000000000 +2017,5300000000 +2017,5300000000 +2018,8500000000 +2012,3100000000 +2016,3200000000 +2016,3300000000 +2015,3990000000 +2013,4200000000 +2017,4300000000 +2014,4310000000 +2017,4800000000 +2017,4800000000 +2017,4800000000 +2012,5000000000 +2013,5000000000 +2014,5560000000 +2017,6100000000 +2018,6900000000 +2016,4000000000 +2018,6900000000 +2017,5500000000 +2018,5500000000 +2017,7000000000 +2015,7100000000 +2017,8000000000 +2016,7200000000 +2017,8000000000 +2016,8000000000 +2017,9700000000 +2017,250000000 +2015,10000000000 +2017,5450000000 +2018,10000000000 +2017,4300000000 +2017,18000000000 +2017,19200000000 +2018,8876000000 +2018,23600000000 +2018,9000000000 From 84133e9fef2e579e7a2a0a2a0faff565d51be888 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 9 Jul 2019 14:29:15 -0400 Subject: [PATCH 148/329] update --- nlp_class/spam2.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/nlp_class/spam2.py b/nlp_class/spam2.py index b5e069cc..bba2a72b 100644 --- a/nlp_class/spam2.py +++ b/nlp_class/spam2.py @@ -35,15 +35,19 @@ df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1}) Y = df['b_labels'].values +# split up the data +df_train, df_test, Ytrain, Ytest = train_test_split(df['data'], Y, test_size=0.33) + # try multiple ways of calculating features -# tfidf = TfidfVectorizer(decode_error='ignore') -# X = tfidf.fit_transform(df['data']) +tfidf = TfidfVectorizer(decode_error='ignore') +Xtrain = tfidf.fit_transform(df_train) +Xtest = tfidf.transform(df_test) + +# count_vectorizer = CountVectorizer(decode_error='ignore') +# Xtrain = count_vectorizer.fit_transform(df_train) +# Xtest = count_vectorizer.transform(df_test) -count_vectorizer = CountVectorizer(decode_error='ignore') -X = count_vectorizer.fit_transform(df['data']) -# split up the data -Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33) # create the model, train it, print scores model = MultinomialNB() From 755c3aa4416a75aaac9e4045c45d3b4a7d9bd9f0 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 22 Jul 2019 16:16:48 -0400 Subject: [PATCH 149/329] update --- cnn_class/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cnn_class/extra_reading.txt b/cnn_class/extra_reading.txt index 7cf34890..e0178415 100644 --- a/cnn_class/extra_reading.txt +++ b/cnn_class/extra_reading.txt @@ -2,4 +2,7 @@ Gradient-Based Learning Applied to Document Recognition http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf ImageNet Classification with Deep Convolutional Neural Networks -https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf \ No newline at end of file +https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf + +Convolution arithmetic tutorial +http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html \ No newline at end of file From c61ac1d976875435e1413005b10764fd350010eb Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 28 Jul 2019 16:11:35 -0400 Subject: [PATCH 150/329] update --- tf2.0/extra_reading.txt | 18 + tf2.0/sbux.csv | 1260 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 1278 insertions(+) create mode 100644 tf2.0/extra_reading.txt create mode 100644 tf2.0/sbux.csv diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt new file mode 100644 index 00000000..7d6d1ba3 --- /dev/null +++ b/tf2.0/extra_reading.txt @@ -0,0 +1,18 @@ +Deep learning improved by biological activation functions +https://arxiv.org/pdf/1804.11237.pdf + +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift +Sergey Ioffe, Christian Szegedy +https://arxiv.org/abs/1502.03167 + +Dropout: A Simple Way to Prevent Neural Networks from Overfitting +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf + +Convolution arithmetic tutorial +http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html + +On the Practical Computational Power of Finite Precision RNNs for Language Recognition +https://arxiv.org/abs/1805.04908 + +Massive Exploration of Neural Machine Translation Architectures +https://arxiv.org/abs/1703.03906 \ No newline at end of file diff --git a/tf2.0/sbux.csv b/tf2.0/sbux.csv new file mode 100644 index 00000000..05576b6e --- /dev/null +++ b/tf2.0/sbux.csv @@ -0,0 +1,1260 @@ +date,open,high,low,close,volume,Name +2013-02-08,27.92,28.325,27.92,28.185,7146296,SBUX +2013-02-11,28.26,28.26,27.93,28.07,5457354,SBUX +2013-02-12,28.0,28.275,27.975,28.13,8665592,SBUX +2013-02-13,28.23,28.23,27.75,27.915,7022056,SBUX +2013-02-14,27.765,27.905,27.675,27.775,8899188,SBUX +2013-02-15,27.805,27.85,27.085,27.17,18195730,SBUX +2013-02-19,27.18,27.305,27.01,27.225,11760912,SBUX +2013-02-20,27.3,27.42,26.59,26.655,12472506,SBUX +2013-02-21,26.535,26.82,26.26,26.675,13896450,SBUX +2013-02-22,26.85,27.105,26.64,27.085,11487316,SBUX +2013-02-25,27.2,27.355,26.6,26.605,12333954,SBUX +2013-02-26,26.715,26.93,26.425,26.64,10607724,SBUX +2013-02-27,26.625,27.4875,26.54,27.285,12056302,SBUX +2013-02-28,27.325,27.585,27.225,27.425,10394356,SBUX +2013-03-01,27.315,27.465,27.0,27.435,8451764,SBUX +2013-03-04,27.385,27.86,27.33,27.85,10193852,SBUX +2013-03-05,28.0,28.4,28.0,28.255,12931844,SBUX +2013-03-06,28.38,28.745,28.325,28.55,14925144,SBUX +2013-03-07,28.55,29.25,28.545,29.125,18237018,SBUX +2013-03-08,29.335,29.485,29.0725,29.335,14215718,SBUX +2013-03-11,29.2,29.465,29.165,29.305,9897766,SBUX +2013-03-12,29.225,29.275,28.99,29.14,11670100,SBUX +2013-03-13,29.165,29.4,29.14,29.2925,7435340,SBUX +2013-03-14,29.26,29.375,28.705,28.84,14723066,SBUX +2013-03-15,28.68,28.93,28.63,28.83,15102742,SBUX +2013-03-18,28.53,28.71,28.375,28.465,10521204,SBUX +2013-03-19,28.255,28.49,27.98,28.415,13337034,SBUX +2013-03-20,28.61,28.805,28.5,28.715,9620874,SBUX +2013-03-21,28.65,28.71,28.375,28.525,8307328,SBUX +2013-03-22,28.65,28.875,28.58,28.69,8720670,SBUX +2013-03-25,28.765,28.915,28.075,28.345,10580234,SBUX +2013-03-26,28.495,28.58,28.355,28.525,6128410,SBUX +2013-03-27,28.43,28.475,28.105,28.455,7456828,SBUX +2013-03-28,28.465,28.63,28.43,28.475,7620390,SBUX +2013-04-01,28.565,28.67,28.325,28.435,7009632,SBUX +2013-04-02,28.595,29.165,28.575,29.13,13495550,SBUX +2013-04-03,29.2,29.45,28.69,28.85,11272606,SBUX +2013-04-04,28.805,29.155,28.805,29.055,7568480,SBUX +2013-04-05,28.605,28.9382,28.3592,28.9,8993596,SBUX +2013-04-08,28.915,29.06,28.73,29.06,7343972,SBUX +2013-04-09,28.98,29.0,28.59,28.705,8361158,SBUX +2013-04-10,28.82,29.155,28.8045,28.9,8577388,SBUX +2013-04-11,28.975,29.495,28.9,29.2875,10416656,SBUX +2013-04-12,29.355,29.855,29.3,29.545,10418310,SBUX +2013-04-15,29.33,29.72,28.8,28.855,10700276,SBUX +2013-04-16,28.97,29.47,28.86,29.28,8849328,SBUX +2013-04-17,29.015,29.275,28.825,29.085,7207368,SBUX +2013-04-18,29.18,29.24,28.75,28.86,8776706,SBUX +2013-04-19,28.79,29.29,28.77,29.2025,8605504,SBUX +2013-04-22,29.21,29.435,28.99,29.32,5547302,SBUX +2013-04-23,29.42,29.95,29.3899,29.695,11323422,SBUX +2013-04-24,29.845,29.985,29.565,29.915,10410890,SBUX +2013-04-25,30.0,30.32,29.955,30.25,16550532,SBUX +2013-04-26,29.65,30.195,29.6,30.0,14970972,SBUX +2013-04-29,30.175,30.46,30.065,30.29,7400546,SBUX +2013-04-30,30.315,30.455,30.155,30.42,8041092,SBUX +2013-05-01,30.275,30.34,29.975,30.07,6294596,SBUX +2013-05-02,30.22,30.34,29.8,30.19,6237460,SBUX +2013-05-03,30.425,30.985,30.35,30.935,9835550,SBUX +2013-05-06,31.0,31.24,30.96,31.24,6997134,SBUX +2013-05-07,31.265,31.265,30.855,31.095,7495628,SBUX +2013-05-08,31.17,31.22,30.865,31.205,5507696,SBUX +2013-05-09,31.33,31.45,31.02,31.18,8276152,SBUX +2013-05-10,31.285,31.595,31.195,31.5485,7480820,SBUX +2013-05-13,31.6,31.615,31.305,31.41,5906892,SBUX +2013-05-14,31.46,31.785,31.39,31.76,7510580,SBUX +2013-05-15,31.6592,32.1,31.61,32.035,9654546,SBUX +2013-05-16,32.0,32.0325,31.725,31.775,6661036,SBUX +2013-05-17,31.885,32.07,31.75,32.065,6934282,SBUX +2013-05-20,32.015,32.305,31.89,31.915,7211790,SBUX +2013-05-21,32.02,32.23,31.94,32.125,6588946,SBUX +2013-05-22,32.075,32.465,31.98,32.075,11012050,SBUX +2013-05-23,31.845,31.91,31.515,31.76,8258348,SBUX +2013-05-24,31.54,31.7,31.305,31.68,6507242,SBUX +2013-05-28,32.105,32.35,32.02,32.13,7438516,SBUX +2013-05-29,31.895,32.05,31.5575,31.815,7560250,SBUX +2013-05-30,31.935,32.0221,31.7275,31.735,8798456,SBUX +2013-05-31,31.605,32.1725,31.395,31.57,10205308,SBUX +2013-06-03,31.63,31.77,31.31,31.73,11417578,SBUX +2013-06-04,31.305,32.07,31.305,31.665,8622564,SBUX +2013-06-05,31.52,31.65,31.155,31.17,8408538,SBUX +2013-06-06,31.175,31.5275,31.16,31.51,8625308,SBUX +2013-06-07,31.705,32.545,31.705,32.52,13752898,SBUX +2013-06-10,32.83,33.155,32.74,33.055,12412340,SBUX +2013-06-11,32.735,33.0525,32.655,32.71,7897496,SBUX +2013-06-12,32.91,33.0,32.13,32.225,10069920,SBUX +2013-06-13,32.27,33.095,32.035,32.985,11909482,SBUX +2013-06-14,32.975,33.335,32.725,32.8,9854434,SBUX +2013-06-17,33.095,33.28,32.8575,33.015,8115800,SBUX +2013-06-18,33.0,33.565,32.975,33.5475,7451352,SBUX +2013-06-19,33.51,33.74,33.205,33.205,10644698,SBUX +2013-06-20,32.91,33.16,32.52,32.61,11606540,SBUX +2013-06-21,32.855,32.96,32.115,32.345,14709502,SBUX +2013-06-24,32.0,32.289,31.59,32.005,9913028,SBUX +2013-06-25,32.265,32.68,32.22,32.37,9856852,SBUX +2013-06-26,32.685,33.3546,32.4455,32.9,9545442,SBUX +2013-06-27,33.135,33.25,32.825,32.845,6292594,SBUX +2013-06-28,32.755,33.125,32.7,32.755,10836506,SBUX +2013-07-01,33.045,33.245,32.91,33.12,6692040,SBUX +2013-07-02,33.12,33.6,33.04,33.395,6765528,SBUX +2013-07-03,33.215,33.84,33.2,33.65,5635188,SBUX +2013-07-05,33.935,34.0,33.45,33.86,5189244,SBUX +2013-07-08,33.97,34.41,33.94,34.145,8029920,SBUX +2013-07-09,34.45,34.5,33.995,34.065,7459914,SBUX +2013-07-10,33.935,34.06,33.725,34.05,8090792,SBUX +2013-07-11,34.34,34.7575,34.175,34.67,8385798,SBUX +2013-07-12,34.62,34.86,34.39,34.86,9706492,SBUX +2013-07-15,34.845,34.95,34.765,34.83,6180564,SBUX +2013-07-16,34.82,34.86,34.54,34.76,6068146,SBUX +2013-07-17,34.87,34.9076,33.945,34.1,12147310,SBUX +2013-07-18,34.225,34.505,34.02,34.24,8537754,SBUX +2013-07-19,34.285,34.555,34.2,34.395,7196954,SBUX +2013-07-22,34.54,34.605,34.275,34.51,6559682,SBUX +2013-07-23,34.62,34.66,33.725,33.83,8465332,SBUX +2013-07-24,34.06,34.215,33.15,33.305,14033776,SBUX +2013-07-25,33.47,34.15,33.365,34.085,15971984,SBUX +2013-07-26,36.3,36.76,35.925,36.68,32293248,SBUX +2013-07-29,36.375,36.54,36.05,36.225,12801986,SBUX +2013-07-30,36.45,36.49,35.855,35.965,9808846,SBUX +2013-07-31,36.07,36.25,35.61,35.6445,11248506,SBUX +2013-08-01,36.135,36.865,36.04,36.74,12849582,SBUX +2013-08-02,36.75,37.135,36.58,37.115,9394576,SBUX +2013-08-05,37.11,37.135,36.855,36.985,7606188,SBUX +2013-08-06,36.83,36.925,36.39,36.4,7956060,SBUX +2013-08-07,36.18,36.575,36.0825,36.095,7068922,SBUX +2013-08-08,36.385,36.7,36.2,36.47,5923772,SBUX +2013-08-09,36.295,36.6154,36.26,36.4,5366324,SBUX +2013-08-12,36.14,36.565,36.125,36.465,5473182,SBUX +2013-08-13,36.545,36.545,35.9674,36.32,5377794,SBUX +2013-08-14,36.29,36.29,35.895,35.925,4932052,SBUX +2013-08-15,35.655,35.675,35.155,35.37,7198878,SBUX +2013-08-16,35.49,35.695,35.275,35.355,6929830,SBUX +2013-08-19,35.29,35.6475,35.09,35.145,6349108,SBUX +2013-08-20,35.18,35.5125,35.0558,35.33,6046820,SBUX +2013-08-21,35.21,35.7,35.14,35.3565,6917088,SBUX +2013-08-22,35.54,36.13,35.505,35.95,5399676,SBUX +2013-08-23,36.0325,36.095,35.79,35.985,5869740,SBUX +2013-08-26,36.035,36.105,35.8,35.94,4928868,SBUX +2013-08-27,35.45,35.685,35.055,35.08,8021076,SBUX +2013-08-28,35.045,35.66,34.93,35.48,6249812,SBUX +2013-08-29,35.365,35.92,35.25,35.59,5509920,SBUX +2013-08-30,35.69,35.7,35.165,35.26,5666988,SBUX +2013-09-03,35.85,36.07,35.595,35.8,7249600,SBUX +2013-09-04,35.71,36.135,35.5455,36.07,6502234,SBUX +2013-09-05,36.125,36.38,36.005,36.025,4931220,SBUX +2013-09-06,36.185,36.325,35.465,35.785,6356670,SBUX +2013-09-09,35.84,36.245,35.835,36.22,4936828,SBUX +2013-09-10,36.49,37.16,36.465,37.1075,11779224,SBUX +2013-09-11,37.165,37.7499,37.165,37.695,9982644,SBUX +2013-09-12,37.75,37.955,37.5603,37.835,7628560,SBUX +2013-09-13,37.795,37.85,37.325,37.785,5767336,SBUX +2013-09-16,38.285,38.3,37.43,37.62,8787132,SBUX +2013-09-17,37.63,38.12,37.585,38.02,6661016,SBUX +2013-09-18,37.94,38.81,37.775,38.665,9488762,SBUX +2013-09-19,38.76,38.9225,38.15,38.175,8042062,SBUX +2013-09-20,38.13,38.5,38.035,38.06,9620500,SBUX +2013-09-23,38.055,38.19,37.535,37.68,6783882,SBUX +2013-09-24,37.765,38.655,37.739000000000004,38.275,7633976,SBUX +2013-09-25,38.42,38.505,38.0352,38.17,7261490,SBUX +2013-09-26,38.32,38.6395,38.16,38.59,4988814,SBUX +2013-09-27,38.455,38.74,38.2,38.665,6200278,SBUX +2013-09-30,38.25,38.5425,38.025,38.485,8828424,SBUX +2013-10-01,38.49,38.67,38.28,38.58,5205160,SBUX +2013-10-02,38.295,38.615,38.105,38.595,6088422,SBUX +2013-10-03,38.48,38.73,38.16,38.435,8164122,SBUX +2013-10-04,38.4,38.73,38.255,38.7,5226696,SBUX +2013-10-07,38.3,38.695,38.205,38.4305,5106784,SBUX +2013-10-08,38.555,39.015,37.72,37.765,12663250,SBUX +2013-10-09,37.695,37.9,37.2263,37.63,9482078,SBUX +2013-10-10,38.145,38.655,38.065,38.56,6638096,SBUX +2013-10-11,38.61,38.92,38.44,38.91,5530376,SBUX +2013-10-14,38.7675,39.16,38.625,39.05,5986662,SBUX +2013-10-15,38.915,38.9475,38.3,38.355,8146874,SBUX +2013-10-16,38.6,39.1125,38.445,39.02,10103938,SBUX +2013-10-17,38.835,39.425,38.725,39.3675,6562580,SBUX +2013-10-18,39.635,39.835,39.485,39.655,9049968,SBUX +2013-10-21,39.555,39.955,39.335,39.73,6770290,SBUX +2013-10-22,39.95,40.54,39.8,40.45,8419570,SBUX +2013-10-23,40.225,40.425,39.945,40.025,7908094,SBUX +2013-10-24,39.68,39.72,39.255,39.525,13508366,SBUX +2013-10-25,39.735,40.0,39.635,39.98,7251840,SBUX +2013-10-28,40.1,40.215,39.2625,39.355,11242732,SBUX +2013-10-29,39.605,39.81,39.525,39.81,7898436,SBUX +2013-10-30,40.315,40.425,39.88,40.415,17518716,SBUX +2013-10-31,39.345,40.81,39.2859,40.525,20491552,SBUX +2013-11-01,40.77,40.77,39.835,40.185,12005206,SBUX +2013-11-04,40.375,40.39,40.105,40.185,8285184,SBUX +2013-11-05,40.18,41.17,40.075,40.995,10092844,SBUX +2013-11-06,41.195,41.25,40.485,40.565,8562294,SBUX +2013-11-07,40.65,40.675,39.5,39.535,13283288,SBUX +2013-11-08,39.755,40.625,39.7,40.6,11487736,SBUX +2013-11-11,40.6,40.71,40.49,40.495,5146208,SBUX +2013-11-12,40.38,40.5249,40.06,40.3075,6613072,SBUX +2013-11-13,39.62,40.75,39.595,40.7305,12519058,SBUX +2013-11-14,40.765,40.77,40.3925,40.57,7647688,SBUX +2013-11-15,40.625,40.725,40.39,40.595,6445400,SBUX +2013-11-18,40.51,40.67,40.105,40.27,8322628,SBUX +2013-11-19,40.005,40.46,39.93,39.96,8725944,SBUX +2013-11-20,40.065,40.125,39.66,39.845,8734150,SBUX +2013-11-21,39.9,40.825,39.875,40.765,13139094,SBUX +2013-11-22,40.85,40.85,40.55,40.675,7926170,SBUX +2013-11-25,40.685,40.74,40.255,40.355,9001126,SBUX +2013-11-26,40.43,40.825,40.2025,40.755,8767864,SBUX +2013-11-27,40.695,40.955,40.6,40.81,4752000,SBUX +2013-11-29,40.925,41.185,40.685,40.73,4387144,SBUX +2013-12-02,40.745,40.8445,40.455,40.535,5773800,SBUX +2013-12-03,40.37,40.5775,40.145,40.275,7893264,SBUX +2013-12-04,40.195,40.36,39.72,39.75,10206454,SBUX +2013-12-05,39.78,40.125,39.665,39.86,6569194,SBUX +2013-12-06,40.2975,40.375,39.855,39.97,6736632,SBUX +2013-12-09,40.13,40.23,39.8025,39.865,7936162,SBUX +2013-12-10,39.495,39.535,38.34,38.69,26329504,SBUX +2013-12-11,38.8,38.9975,38.145,38.2,15751996,SBUX +2013-12-12,38.14,38.545,38.0,38.24,9651038,SBUX +2013-12-13,38.415,38.465,38.045,38.175,8052582,SBUX +2013-12-16,38.015,38.49,37.955,38.23,9348824,SBUX +2013-12-17,38.29,38.29,37.955,38.045,7008900,SBUX +2013-12-18,38.065,38.845,38.05,38.84,10268256,SBUX +2013-12-19,39.12,39.25,38.51,38.575,11406282,SBUX +2013-12-20,38.76,38.99,38.622,38.83,12707400,SBUX +2013-12-23,38.985,39.17,38.675,39.16,7624552,SBUX +2013-12-24,39.05,39.375,38.975,39.285,3896612,SBUX +2013-12-26,39.365,39.5125,39.2195,39.44,4391338,SBUX +2013-12-27,39.645,39.65,39.23,39.285,4506128,SBUX +2013-12-30,39.395,39.415,38.93,39.275,4734486,SBUX +2013-12-31,39.215,39.39,39.005,39.195,6016240,SBUX +2014-01-02,39.035,39.135,38.5025,38.585,8528022,SBUX +2014-01-03,38.74,38.885,38.47,38.475,6545626,SBUX +2014-01-06,38.45,38.675,38.005,38.085,10604900,SBUX +2014-01-07,38.325,38.705,38.235,38.605,8180398,SBUX +2014-01-08,38.64,39.0735,38.595,39.015,10190576,SBUX +2014-01-09,39.035,39.05,38.4,38.8,8370276,SBUX +2014-01-10,38.78,38.995,38.475,38.835,6475950,SBUX +2014-01-13,38.69,38.695,37.38,37.56,14730286,SBUX +2014-01-14,37.56,38.1775,37.375,37.73,18668208,SBUX +2014-01-15,37.77,38.165,37.705,38.095,8721564,SBUX +2014-01-16,37.91,38.0975,37.565,37.645,9110924,SBUX +2014-01-17,37.5,37.73,37.335,37.45,13007820,SBUX +2014-01-21,37.525,37.535,36.63,36.825,18777272,SBUX +2014-01-22,36.975,37.1,36.77,36.8,13989730,SBUX +2014-01-23,36.74,36.865,35.84,36.695,30451212,SBUX +2014-01-24,37.365,38.08,37.055,37.49,33218428,SBUX +2014-01-27,37.615,37.615,37.01,37.105,18212030,SBUX +2014-01-28,37.285,37.3975,36.835,36.945,11103896,SBUX +2014-01-29,36.75,36.83,35.73,35.78,16002076,SBUX +2014-01-30,36.14,36.215,35.655,35.955,15181598,SBUX +2014-01-31,35.445,35.9705,35.435,35.56,12287056,SBUX +2014-02-03,35.5,35.755,34.335,34.485,20353852,SBUX +2014-02-04,35.0,35.6,34.685,35.325,21066236,SBUX +2014-02-05,35.045,35.37,34.805,35.245,10968328,SBUX +2014-02-06,35.33,36.4,35.28,36.18,13819714,SBUX +2014-02-07,36.805,37.245,36.345,37.0175,14752770,SBUX +2014-02-10,37.155,37.59,37.155,37.4,12948390,SBUX +2014-02-11,37.435,37.595,36.895,37.25,16165852,SBUX +2014-02-12,37.245,37.53,36.85,36.955,9973052,SBUX +2014-02-13,36.78,37.345,36.695,37.345,8477256,SBUX +2014-02-14,37.175,37.545,37.005,37.515,8266438,SBUX +2014-02-18,37.495,37.5,36.9675,36.985,10967746,SBUX +2014-02-19,36.92,37.14,36.615,36.66,9802100,SBUX +2014-02-20,36.7,36.855,36.24,36.775,8553032,SBUX +2014-02-21,36.89,36.93,36.26,36.28,11298258,SBUX +2014-02-24,36.33,36.44,36.01,36.28,11953270,SBUX +2014-02-25,36.25,36.33,35.275,35.275,18641246,SBUX +2014-02-26,35.4,36.11,34.975,35.89,19156536,SBUX +2014-02-27,35.81,36.12,35.715,36.095,11298448,SBUX +2014-02-28,35.77,35.9663,35.25,35.48,22465506,SBUX +2014-03-03,35.005,35.365,35.0,35.235,12248352,SBUX +2014-03-04,35.715,35.98,35.525,35.83,12255104,SBUX +2014-03-05,35.995,36.045,35.44,35.65,9783410,SBUX +2014-03-06,35.915,36.6675,35.885,36.345,13781048,SBUX +2014-03-07,36.495,36.55,36.105,36.535,8409372,SBUX +2014-03-10,36.625,36.82,36.395,36.78,8648776,SBUX +2014-03-11,36.995,37.704,36.925,37.515,18368750,SBUX +2014-03-12,37.275,37.83,37.25,37.815,10450862,SBUX +2014-03-13,37.92,38.21,37.015,37.215,11380782,SBUX +2014-03-14,37.045,37.445,37.0118,37.135,8962602,SBUX +2014-03-17,37.405,37.495,36.91,37.09,11019894,SBUX +2014-03-18,37.175,37.42,37.025,37.3,5997520,SBUX +2014-03-19,37.5,38.665,37.41,37.955,24983140,SBUX +2014-03-20,38.145,38.565,37.8,38.4775,13851476,SBUX +2014-03-21,39.105,39.32,38.31,38.355,18036904,SBUX +2014-03-24,38.45,38.555,37.605,37.885,10018724,SBUX +2014-03-25,38.22,38.255,37.145,37.305,10568158,SBUX +2014-03-26,37.465,37.495,36.76,36.77,8371118,SBUX +2014-03-27,36.535,36.98,36.345,36.7,12675658,SBUX +2014-03-28,36.645,37.03,36.525,36.85,6582750,SBUX +2014-03-31,36.95,37.2,36.585,36.69,7721754,SBUX +2014-04-01,36.82,37.49,36.705,37.005,8741780,SBUX +2014-04-02,37.175,37.185,36.62,36.835,8275518,SBUX +2014-04-03,36.93,37.17,36.41,36.545,7165454,SBUX +2014-04-04,36.775,36.945,35.66,35.775,11708546,SBUX +2014-04-07,35.6,35.69,35.025,35.215,11089724,SBUX +2014-04-08,35.32,35.8475,35.25,35.74,10679090,SBUX +2014-04-09,35.815,36.34,35.6,36.24,9055586,SBUX +2014-04-10,36.37,36.37,35.085,35.11,13375470,SBUX +2014-04-11,34.895,35.065,34.34,34.365,16368844,SBUX +2014-04-14,34.815,35.02,34.35,34.655,12176480,SBUX +2014-04-15,34.98,35.05,33.965,34.445,15461760,SBUX +2014-04-16,34.825,35.44,34.58,35.395,11278644,SBUX +2014-04-17,35.285,35.695,35.07,35.075,9245966,SBUX +2014-04-21,35.155,35.3,34.925,35.24,6098442,SBUX +2014-04-22,35.265,35.6375,35.165,35.5745,8862690,SBUX +2014-04-23,35.73,35.735,35.11,35.195,8881548,SBUX +2014-04-24,35.76,36.02,35.055,35.545,15577016,SBUX +2014-04-25,36.0,36.125,35.47,35.725,18122130,SBUX +2014-04-28,35.945,36.01,34.9,35.465,10733556,SBUX +2014-04-29,35.59,35.725,35.1325,35.32,9830164,SBUX +2014-04-30,35.31,35.38,35.06,35.31,7283078,SBUX +2014-05-01,35.375,35.6,35.16,35.56,7919194,SBUX +2014-05-02,35.625,35.7625,35.285,35.3,8306490,SBUX +2014-05-05,35.25,35.5275,35.09,35.46,5486598,SBUX +2014-05-06,35.365,35.415,34.665,34.79,10926786,SBUX +2014-05-07,34.89,34.9975,34.57,34.87,10961996,SBUX +2014-05-08,34.8,35.2475,34.67,34.79,7531344,SBUX +2014-05-09,34.85,35.155,34.7612,35.145,6623136,SBUX +2014-05-12,35.32,35.62,35.2,35.575,7227204,SBUX +2014-05-13,35.485,35.67,35.45,35.58,5802916,SBUX +2014-05-14,35.59,35.6,35.015,35.085,8586022,SBUX +2014-05-15,35.13,35.165,34.64,34.925,9124686,SBUX +2014-05-16,35.0,35.52,34.905,35.47,9294826,SBUX +2014-05-19,35.28,35.56,35.14,35.51,6757810,SBUX +2014-05-20,35.38,35.448,34.9201,35.115,8699878,SBUX +2014-05-21,35.355,35.375,35.035,35.2,6089286,SBUX +2014-05-22,35.175,35.885,35.15,35.7,7359636,SBUX +2014-05-23,36.145,36.255,35.915,35.99,7166310,SBUX +2014-05-27,36.32,36.89,36.27,36.83,10100398,SBUX +2014-05-28,36.65,36.785,36.4575,36.635,8212030,SBUX +2014-05-29,36.76,36.78,36.325,36.555,6448878,SBUX +2014-05-30,36.58,36.75,36.265,36.62,6879534,SBUX +2014-06-02,36.61,37.0275,36.58,36.925,5926156,SBUX +2014-06-03,36.86,37.175,36.79,37.09,6768354,SBUX +2014-06-04,37.06,37.35,36.82,37.335,6495704,SBUX +2014-06-05,37.18,37.57,37.1,37.36,5188766,SBUX +2014-06-06,37.53,37.77,37.4,37.665,6204706,SBUX +2014-06-09,37.69,37.7,37.325,37.59,5701018,SBUX +2014-06-10,37.625,37.72,37.16,37.3,6640912,SBUX +2014-06-11,37.07,37.435,36.925,37.4,7510866,SBUX +2014-06-12,37.345,37.35,36.865,36.98,7193748,SBUX +2014-06-13,36.935,37.4475,36.765,37.345,7728298,SBUX +2014-06-16,37.24,37.58,37.23,37.545,6759676,SBUX +2014-06-17,37.515,37.825,37.495,37.655,5778600,SBUX +2014-06-18,37.6,37.8424,37.3,37.78,5786570,SBUX +2014-06-19,38.275,38.75,38.23,38.615,12555078,SBUX +2014-06-20,38.815,38.815,38.205,38.3,12446594,SBUX +2014-06-23,38.345,38.4,38.0955,38.365,4780230,SBUX +2014-06-24,38.45,38.875,38.27,38.715,10912302,SBUX +2014-06-25,38.52,39.09,38.48,39.06,7813834,SBUX +2014-06-26,39.005,39.165,38.735,39.03,6983266,SBUX +2014-06-27,38.98,39.175,38.895,38.97,8669268,SBUX +2014-06-30,39.05,39.095,38.54,38.69,9610248,SBUX +2014-07-01,38.945,39.1,38.8,39.04,8073888,SBUX +2014-07-02,39.19,39.2045,38.955,39.095,4794618,SBUX +2014-07-03,39.195,39.7,39.07,39.53,6662616,SBUX +2014-07-07,39.39,39.69,39.305,39.345,7443924,SBUX +2014-07-08,39.32,39.45,39.125,39.28,7802080,SBUX +2014-07-09,39.27,39.74,39.1801,39.725,7783444,SBUX +2014-07-10,39.305,39.525,39.095,39.425,4720762,SBUX +2014-07-11,39.48,39.495,39.07,39.3,4254768,SBUX +2014-07-14,39.49,39.49,39.21,39.28,4562120,SBUX +2014-07-15,39.325,39.575,39.23,39.445,8369728,SBUX +2014-07-16,39.53,39.53,39.155,39.365,8734258,SBUX +2014-07-17,39.06,39.23,38.56,38.62,8446350,SBUX +2014-07-18,38.85,39.075,38.625,38.97,6744520,SBUX +2014-07-21,38.93,38.965,38.585,38.805,5021858,SBUX +2014-07-22,39.165,39.515,39.1,39.37,6457244,SBUX +2014-07-23,39.255,39.6395,39.2,39.57,6450280,SBUX +2014-07-24,39.795,40.32,39.575,40.225,16129286,SBUX +2014-07-25,39.2,39.66,39.0,39.37,18984366,SBUX +2014-07-28,39.4,39.5,39.0865,39.18,8012924,SBUX +2014-07-29,39.245,39.625,39.205,39.325,7937248,SBUX +2014-07-30,39.5,39.59,39.26,39.45,8911708,SBUX +2014-07-31,39.26,39.35,38.76,38.84,8147584,SBUX +2014-08-01,38.75,38.945,38.29,38.49,7798186,SBUX +2014-08-04,38.63,38.88,38.515,38.765,6282674,SBUX +2014-08-05,38.585,38.73,38.215,38.395,6696400,SBUX +2014-08-06,38.25,38.72,38.135,38.565,5935218,SBUX +2014-08-07,38.665,38.72,38.28,38.355,5427964,SBUX +2014-08-08,38.315,38.835,38.255,38.81,5957988,SBUX +2014-08-11,38.835,39.125,38.825,38.935,4522400,SBUX +2014-08-12,38.935,39.0718,38.76,38.91,4723844,SBUX +2014-08-13,39.05,39.05,38.565,38.62,6935144,SBUX +2014-08-14,38.74,38.75,38.08,38.31,10892582,SBUX +2014-08-15,38.61,38.64,38.185,38.455,8090044,SBUX +2014-08-18,38.595,38.93,38.59,38.795,6845416,SBUX +2014-08-19,38.9,39.1,38.8,39.06,4919030,SBUX +2014-08-20,39.005,39.235,38.87,39.015,4668758,SBUX +2014-08-21,38.905,39.085,38.695,38.735,4811992,SBUX +2014-08-22,38.62,38.8,38.525,38.64,4552328,SBUX +2014-08-25,38.82,39.11,38.755,38.985,5698322,SBUX +2014-08-26,39.005,39.215,38.8775,38.895,5513856,SBUX +2014-08-27,38.955,39.135,38.83,38.96,4904018,SBUX +2014-08-28,38.895,39.015,38.755,38.905,3958032,SBUX +2014-08-29,39.035,39.05,38.76,38.905,4657356,SBUX +2014-09-02,38.85,38.99,38.58,38.74,6197468,SBUX +2014-09-03,38.78,38.965,38.35,38.395,6796736,SBUX +2014-09-04,38.4,38.6863,38.38,38.58,5749790,SBUX +2014-09-05,38.505,38.985,38.405,38.975,8026608,SBUX +2014-09-08,38.88,38.98,38.665,38.835,4492776,SBUX +2014-09-09,38.725,38.875,38.49,38.56,5114572,SBUX +2014-09-10,38.505,38.7,38.375,38.605,5918672,SBUX +2014-09-11,38.49,38.5,38.04,38.06,9365624,SBUX +2014-09-12,37.915,38.035,37.47,37.735,14619348,SBUX +2014-09-15,37.63,37.67,37.195,37.46,10845708,SBUX +2014-09-16,37.42,37.725,37.265,37.545,8041214,SBUX +2014-09-17,37.67,37.9105,37.5525,37.67,7475730,SBUX +2014-09-18,37.795,37.945,37.5,37.865,8028914,SBUX +2014-09-19,37.985,38.165,37.885,38.035,12040726,SBUX +2014-09-22,38.025,38.025,37.165,37.3,7859646,SBUX +2014-09-23,37.145,37.44,36.89,36.9775,7739386,SBUX +2014-09-24,37.265,37.715,37.175,37.66,8599714,SBUX +2014-09-25,37.59,37.64,37.045,37.06,8010814,SBUX +2014-09-26,37.0,37.615,36.97,37.585,7659794,SBUX +2014-09-29,37.275,37.76,37.225,37.635,6376526,SBUX +2014-09-30,37.78,37.925,37.51,37.73,7800904,SBUX +2014-10-01,37.84,37.955,37.205,37.305,8119064,SBUX +2014-10-02,37.205,37.5,36.89,37.225,8573520,SBUX +2014-10-03,37.4,38.05,37.385,37.945,8229168,SBUX +2014-10-06,38.01,38.115,37.55,37.5725,5221748,SBUX +2014-10-07,37.45,37.525,37.01,37.025,6383432,SBUX +2014-10-08,37.12,37.71,36.815,37.63,6716308,SBUX +2014-10-09,37.52,37.91,37.18,37.24,9343828,SBUX +2014-10-10,37.13,37.895,37.05,37.23,10495940,SBUX +2014-10-13,37.105,37.22,36.005,36.095,12431414,SBUX +2014-10-14,36.205,36.79,36.105,36.37,10995022,SBUX +2014-10-15,36.015,36.465,35.635,36.19,13622008,SBUX +2014-10-16,35.59,36.435,35.385,36.32,9738832,SBUX +2014-10-17,36.675,36.955,36.44,36.77,9581060,SBUX +2014-10-20,36.805,37.375,36.7025,37.35,7957028,SBUX +2014-10-21,37.5,37.5925,37.07,37.18,12312930,SBUX +2014-10-22,37.24,37.4925,37.08,37.3,6469670,SBUX +2014-10-23,37.575,37.755,37.38,37.42,6837730,SBUX +2014-10-24,37.45,37.955,37.175,37.905,7359034,SBUX +2014-10-27,38.005,38.19,37.91,37.985,5796276,SBUX +2014-10-28,38.17,38.535,37.94,38.525,8150988,SBUX +2014-10-29,38.55,38.8325,38.11,38.27,8794902,SBUX +2014-10-30,38.09,38.725,38.055,38.66,13388970,SBUX +2014-10-31,37.625,38.37,37.46,37.78,35889908,SBUX +2014-11-03,37.985,38.135,37.735,38.05,10606936,SBUX +2014-11-04,37.805,38.47,37.75,38.355,10420764,SBUX +2014-11-05,38.505,38.695,38.295,38.33,7403396,SBUX +2014-11-06,38.485,38.775,38.33,38.725,6167276,SBUX +2014-11-07,38.82,39.045,38.605,38.895,8248994,SBUX +2014-11-10,38.82,38.95,38.585,38.825,6555370,SBUX +2014-11-11,38.915,39.1,38.625,38.865,5201438,SBUX +2014-11-12,38.705,39.005,38.635,38.925,5312194,SBUX +2014-11-13,39.095,39.24,38.7059,38.945,6842248,SBUX +2014-11-14,39.04,39.15,38.865,39.06,6039282,SBUX +2014-11-17,38.97,39.235,38.8225,38.915,5899020,SBUX +2014-11-18,38.815,39.0,38.6575,38.785,5755816,SBUX +2014-11-19,38.72,39.04,38.7,38.91,5231186,SBUX +2014-11-20,38.83,39.32,38.825,39.1,6287096,SBUX +2014-11-21,39.565,39.96,39.385,39.88,14027718,SBUX +2014-11-24,39.95,40.41,39.855,40.26,9426200,SBUX +2014-11-25,40.28,40.46,40.025,40.105,8118386,SBUX +2014-11-26,40.06,40.155,39.7,39.85,6196752,SBUX +2014-11-28,40.215,40.82,40.205,40.605,6766674,SBUX +2014-12-01,40.5,40.75,40.31,40.425,8627478,SBUX +2014-12-02,40.345,40.425,40.045,40.185,7656398,SBUX +2014-12-03,40.21,40.375,40.035,40.235,11201396,SBUX +2014-12-04,40.3,41.15,40.195,40.655,14070910,SBUX +2014-12-05,41.25,41.96,41.215,41.785,14788168,SBUX +2014-12-08,42.1,42.1,41.55,41.9,11770652,SBUX +2014-12-09,41.535,41.645,41.1,41.515,8725620,SBUX +2014-12-10,41.45,42.02,41.275,41.33,10096320,SBUX +2014-12-11,41.45,42.01,41.3275,41.56,10058296,SBUX +2014-12-12,41.37,41.985,41.285,41.625,9164230,SBUX +2014-12-15,41.135,41.25,40.345,40.445,16286986,SBUX +2014-12-16,40.245,40.545,39.555,39.565,13981874,SBUX +2014-12-17,39.565,40.295,39.22,40.2175,10161624,SBUX +2014-12-18,40.775,40.82,39.63,40.015,17582942,SBUX +2014-12-19,40.1,40.145,39.705,39.72,17444212,SBUX +2014-12-22,39.925,40.275,39.885,40.27,7583856,SBUX +2014-12-23,40.75,41.025,40.565,40.715,8147722,SBUX +2014-12-24,40.735,40.93,40.59,40.635,2602398,SBUX +2014-12-26,40.715,41.2,40.69,40.915,4550364,SBUX +2014-12-29,40.83,41.275,40.75,41.19,4796662,SBUX +2014-12-30,41.035,41.44,40.895,40.895,5253454,SBUX +2014-12-31,41.095,41.665,41.0,41.025,7628772,SBUX +2015-01-02,41.065,41.4875,40.445,40.72,6906098,SBUX +2015-01-05,40.07,40.335,39.745,39.94,11623796,SBUX +2015-01-06,40.17,40.195,39.28,39.615,7664340,SBUX +2015-01-07,39.875,40.615,39.7,40.59,9732554,SBUX +2015-01-08,41.165,41.65,41.01,41.245,13170548,SBUX +2015-01-09,40.495,40.755,39.56,39.895,27556706,SBUX +2015-01-12,40.145,40.415,39.91,40.115,10021486,SBUX +2015-01-13,40.74,41.07,40.065,40.435,11040702,SBUX +2015-01-14,40.025,40.39,39.805,40.21,9295084,SBUX +2015-01-15,40.3,40.45,39.595,39.79,8126602,SBUX +2015-01-16,39.63,40.39,39.5,40.305,9015502,SBUX +2015-01-20,40.4,40.735,40.165,40.6125,10738304,SBUX +2015-01-21,40.525,40.815,40.265,40.645,10844182,SBUX +2015-01-22,40.68,41.42,40.445,41.37,23913056,SBUX +2015-01-23,43.25,44.35,43.22,44.11,38107194,SBUX +2015-01-26,44.045,44.25,43.705,44.06,14098574,SBUX +2015-01-27,43.89,44.605,43.725,44.17,10995808,SBUX +2015-01-28,44.35,44.795,43.745,43.7825,11963202,SBUX +2015-01-29,44.005,44.65,43.785,44.525,12475860,SBUX +2015-01-30,44.29,44.47,43.695,43.765,10070456,SBUX +2015-02-02,43.84,44.045,42.93,43.995,13638832,SBUX +2015-02-03,43.99,44.245,43.465,44.245,9252426,SBUX +2015-02-04,44.0,44.715,43.995,44.35,11496698,SBUX +2015-02-05,44.355,44.885,44.355,44.82,7598672,SBUX +2015-02-06,44.75,44.8375,44.34,44.5,7835332,SBUX +2015-02-09,44.255,44.6035,44.07,44.41,6911614,SBUX +2015-02-10,44.685,45.69,44.665,45.59,12469500,SBUX +2015-02-11,45.58,45.895,45.3525,45.395,6466910,SBUX +2015-02-12,45.545,45.97,45.045,45.9125,7197558,SBUX +2015-02-13,45.995,45.995,45.4525,45.79,6109522,SBUX +2015-02-17,45.885,46.12,45.63,46.015,6386900,SBUX +2015-02-18,46.195,46.665,46.0,46.5,6541986,SBUX +2015-02-19,46.575,46.89,46.505,46.585,6109176,SBUX +2015-02-20,46.73,46.835,46.485,46.755,6462662,SBUX +2015-02-23,46.925,46.964,46.54,46.79,5854572,SBUX +2015-02-24,46.63,46.995,46.58,46.725,6337888,SBUX +2015-02-25,46.65,47.415,46.625,47.13,8120660,SBUX +2015-02-26,47.15,47.4113,47.04,47.275,6816352,SBUX +2015-02-27,47.395,47.4,46.635,46.7425,8658404,SBUX +2015-03-02,46.665,47.275,46.665,47.1125,7947018,SBUX +2015-03-03,47.05,47.105,46.6,47.0,7578374,SBUX +2015-03-04,46.905,47.095,46.005,46.53,7774534,SBUX +2015-03-05,46.64,46.98,46.5303,46.815,5848750,SBUX +2015-03-06,46.65,46.8475,45.94,46.1075,6814414,SBUX +2015-03-09,46.195,46.565,46.045,46.52,5984880,SBUX +2015-03-10,46.08,46.4549,45.885,46.09,6076984,SBUX +2015-03-11,46.06,46.49,45.61,45.71,8185894,SBUX +2015-03-12,45.985,46.765,45.92,46.69,7295344,SBUX +2015-03-13,46.34,47.185,46.34,46.645,5835252,SBUX +2015-03-16,47.0,47.31,46.87,47.0225,6772606,SBUX +2015-03-17,46.88,47.24,46.8,47.1925,5508796,SBUX +2015-03-18,47.0,48.285,46.675,47.92,15429928,SBUX +2015-03-19,48.38,49.6,48.19,48.88,22020618,SBUX +2015-03-20,49.245,49.45,48.505,48.73,17360612,SBUX +2015-03-23,48.94,48.95,48.355,48.685,7985986,SBUX +2015-03-24,48.555,49.165,48.38,48.9575,7718488,SBUX +2015-03-25,49.07,49.24,47.885,47.885,9907170,SBUX +2015-03-26,47.675,47.875,46.83,47.54,10344304,SBUX +2015-03-27,47.5,47.975,47.375,47.535,7993350,SBUX +2015-03-30,48.02,48.25,47.75,47.99,6830270,SBUX +2015-03-31,47.835,48.1,47.345,47.35,8717754,SBUX +2015-04-01,47.14,47.25,46.28,46.51,14125350,SBUX +2015-04-02,46.71,47.3175,46.61,47.195,8863018,SBUX +2015-04-06,46.925,47.4,46.725,47.26,6058894,SBUX +2015-04-07,47.205,47.48,46.98,47.035,5354670,SBUX +2015-04-08,46.92,47.64,46.92,47.615,6827888,SBUX +2015-04-09,47.65,47.99,47.25,47.96,7109621,SBUX +2015-04-10,48.6,48.6,47.88,48.17,6643106,SBUX +2015-04-13,48.56,48.89,48.38,48.5,8171030,SBUX +2015-04-14,48.52,48.71,47.97,48.3,5952424,SBUX +2015-04-15,48.81,48.81,48.13,48.14,5162169,SBUX +2015-04-16,48.23,48.48,48.16,48.245,5312499,SBUX +2015-04-17,47.9,48.0,47.39,47.62,7539865,SBUX +2015-04-20,47.9,48.12,47.7,47.97,4868425,SBUX +2015-04-21,48.35,48.4799,48.02,48.37,6213360,SBUX +2015-04-22,48.5,48.6,47.98,48.335,7248119,SBUX +2015-04-23,48.55,49.7,48.28,49.43,15866051,SBUX +2015-04-24,51.32,52.09,50.62,51.84,22284881,SBUX +2015-04-27,51.81,51.94,50.76,50.87,11222608,SBUX +2015-04-28,50.6,50.8,49.9801,50.61,8882901,SBUX +2015-04-29,50.42,50.95,50.2,50.65,7161992,SBUX +2015-04-30,50.63,50.68,49.43,49.58,8492048,SBUX +2015-05-01,49.95,50.42,49.68,50.29,5916509,SBUX +2015-05-04,50.3,50.93,50.27,50.445,7493420,SBUX +2015-05-05,49.94,50.05,49.36,49.405,10691207,SBUX +2015-05-06,49.68,49.7,48.57,48.93,8033489,SBUX +2015-05-07,48.74,49.55,48.72,49.35,5681417,SBUX +2015-05-08,49.99,50.4265,49.46,49.78,6039840,SBUX +2015-05-11,49.71,50.22,49.38,49.5,5047180,SBUX +2015-05-12,49.15,49.99,49.0,49.71,5868552,SBUX +2015-05-13,49.85,50.15,49.425,49.59,4927094,SBUX +2015-05-14,49.98,50.59,49.67,50.555,7339742,SBUX +2015-05-15,50.79,50.85,50.39,50.8,6016694,SBUX +2015-05-18,50.65,51.29,50.56,51.18,8999761,SBUX +2015-05-19,51.48,51.715,50.96,51.42,6976052,SBUX +2015-05-20,51.31,51.44,50.43,51.03,5644662,SBUX +2015-05-21,50.94,51.45,50.78,51.33,5084042,SBUX +2015-05-22,51.33,51.65,51.21,51.48,5857672,SBUX +2015-05-26,51.38,51.78,50.66,50.84,7369923,SBUX +2015-05-27,51.04,51.7,50.91,51.59,6213573,SBUX +2015-05-28,51.84,51.94,51.445,51.81,5874382,SBUX +2015-05-29,51.95,52.23,51.45,51.96,9399112,SBUX +2015-06-01,51.96,52.46,51.67,52.22,7075082,SBUX +2015-06-02,51.98,52.3,51.66,51.73,7877799,SBUX +2015-06-03,52.0,52.27,51.67,52.12,5522702,SBUX +2015-06-04,51.87,52.18,51.57,51.72,6230805,SBUX +2015-06-05,51.57,52.44,51.27,52.19,7123248,SBUX +2015-06-08,52.0,52.23,51.49,51.53,6320181,SBUX +2015-06-09,51.35,51.7,51.1,51.54,5034038,SBUX +2015-06-10,51.8,52.86,51.66,52.69,8003611,SBUX +2015-06-11,52.81,53.0,52.44,52.49,6030167,SBUX +2015-06-12,52.41,52.74,52.16,52.63,5236747,SBUX +2015-06-15,52.23,52.46,52.01,52.27,5554964,SBUX +2015-06-16,52.27,53.14,52.2,52.965,6106529,SBUX +2015-06-17,53.09,53.47,52.72,53.24,6735294,SBUX +2015-06-18,53.5,54.28,53.4,54.11,10712142,SBUX +2015-06-19,54.08,54.44,53.84,53.93,10609714,SBUX +2015-06-22,54.325,54.43,53.88,53.9,7100665,SBUX +2015-06-23,54.04,54.18,53.72,54.115,5679984,SBUX +2015-06-24,53.75,53.97,53.5194,53.71,5524969,SBUX +2015-06-25,54.09,54.45,54.0,54.07,5389863,SBUX +2015-06-26,54.46,54.75,54.3,54.62,6637183,SBUX +2015-06-29,53.87,54.39,53.5144,53.55,6534077,SBUX +2015-06-30,54.15,54.3,53.14,53.615,9793969,SBUX +2015-07-01,53.86,54.21,53.6,53.89,6107698,SBUX +2015-07-02,54.03,54.7,53.9501,54.24,5684667,SBUX +2015-07-06,53.64,54.405,53.63,54.305,5396439,SBUX +2015-07-07,54.29,54.54,53.36,54.375,9462256,SBUX +2015-07-08,53.86,54.02,53.3101,53.39,8139303,SBUX +2015-07-09,54.07,54.5,53.88,54.05,7681013,SBUX +2015-07-10,54.54,54.732,54.18,54.57,8087800,SBUX +2015-07-13,55.0,55.89,54.9,55.7,7477170,SBUX +2015-07-14,55.96,56.06,55.52,55.75,7028025,SBUX +2015-07-15,55.92,55.95,55.285,55.34,8212573,SBUX +2015-07-16,56.06,56.16,55.66,55.74,7305520,SBUX +2015-07-17,55.9,55.9,55.37,55.69,8715431,SBUX +2015-07-20,55.73,56.74,55.7,56.21,8029646,SBUX +2015-07-21,56.38,56.47,55.78,56.2,6717452,SBUX +2015-07-22,56.43,56.87,56.25,56.69,6876059,SBUX +2015-07-23,56.98,57.0,56.16,56.56,12439229,SBUX +2015-07-24,59.12,59.31,57.15,57.29,14559687,SBUX +2015-07-27,57.21,57.47,56.85,56.98,8993523,SBUX +2015-07-28,57.39,57.4,56.56,57.14,8689215,SBUX +2015-07-29,57.38,57.8,57.14,57.51,8470512,SBUX +2015-07-30,57.3,58.15,57.01,58.06,7337948,SBUX +2015-07-31,58.44,58.44,57.73,57.93,6519528,SBUX +2015-08-03,58.62,58.96,58.0386,58.19,7664002,SBUX +2015-08-04,58.25,58.72,58.03,58.7,9113083,SBUX +2015-08-05,59.15,59.3198,58.83,59.01,7349063,SBUX +2015-08-06,59.13,59.2,57.09,57.23,11064470,SBUX +2015-08-07,57.33,57.36,56.51,57.2,7781995,SBUX +2015-08-10,57.29,57.626000000000005,55.75,56.27,12029150,SBUX +2015-08-11,55.79,56.435,55.24,56.35,8062553,SBUX +2015-08-12,55.69,56.4,54.95,56.38,10075571,SBUX +2015-08-13,56.52,57.25,56.51,56.85,6731474,SBUX +2015-08-14,56.95,57.12,56.66,57.1,4803903,SBUX +2015-08-17,57.0,57.76,56.73,57.74,5768362,SBUX +2015-08-18,57.96,58.06,57.66,57.83,5575441,SBUX +2015-08-19,57.58,58.08,57.115,57.59,6044193,SBUX +2015-08-20,57.0,57.15,55.77,55.81,7470885,SBUX +2015-08-21,54.72,54.86,52.601000000000006,52.84,20211503,SBUX +2015-08-24,48.05,52.67,42.05,50.34,27158813,SBUX +2015-08-25,52.96,53.61,51.05,51.09,19659002,SBUX +2015-08-26,52.99,54.15,51.27,53.96,15517591,SBUX +2015-08-27,54.74,56.21,54.41,55.95,15987923,SBUX +2015-08-28,55.84,56.31,55.2,55.63,7584826,SBUX +2015-08-31,55.23,55.47,54.5,54.71,7971204,SBUX +2015-09-01,52.82,54.36,52.74,53.5,13424932,SBUX +2015-09-02,54.47,55.29,53.751000000000005,55.26,9891071,SBUX +2015-09-03,55.72,55.76,54.475,54.69,7968868,SBUX +2015-09-04,53.87,54.57,53.84,54.28,6994267,SBUX +2015-09-08,55.31,55.45,54.53,55.21,8051710,SBUX +2015-09-09,55.9,56.0,54.57,54.69,8406656,SBUX +2015-09-10,54.34,55.69,54.33,55.37,8924778,SBUX +2015-09-11,55.19,56.54,55.03,56.53,8363110,SBUX +2015-09-14,56.54,56.91,56.05,56.29,5464463,SBUX +2015-09-15,56.43,57.21,56.115,56.91,6741341,SBUX +2015-09-16,56.83,57.35,56.21,57.26,6593362,SBUX +2015-09-17,57.32,58.1,57.04,57.28,7525349,SBUX +2015-09-18,56.49,57.63,56.28,56.84,16268035,SBUX +2015-09-21,57.2,57.84,56.96,57.54,6220131,SBUX +2015-09-22,56.85,57.25,56.7,57.12,8585093,SBUX +2015-09-23,57.16,57.93,57.05,57.79,6829205,SBUX +2015-09-24,57.38,58.54,57.17,58.37,10027330,SBUX +2015-09-25,58.92,58.96,57.74,57.99,10627026,SBUX +2015-09-28,58.01,58.43,55.6201,55.77,11548114,SBUX +2015-09-29,55.85,56.3,54.81,55.72,9392065,SBUX +2015-09-30,56.4,56.9,55.61,56.84,9799610,SBUX +2015-10-01,56.99,57.5,55.89,57.48,8497124,SBUX +2015-10-02,56.99,58.09,56.5406,58.08,9036765,SBUX +2015-10-05,58.49,59.18,58.07,59.04,8198998,SBUX +2015-10-06,58.82,59.14,58.22,58.69,5642949,SBUX +2015-10-07,58.62,58.83,57.9,58.78,8138313,SBUX +2015-10-08,58.78,59.71,58.39,59.46,6834836,SBUX +2015-10-09,59.47,60.11,59.3,60.07,7969884,SBUX +2015-10-12,60.35,60.89,60.04,60.54,6430301,SBUX +2015-10-13,60.34,60.745,60.0161,60.16,6262774,SBUX +2015-10-14,60.0,60.17,58.43,58.82,8365604,SBUX +2015-10-15,58.95,59.83,58.08,59.69,9745919,SBUX +2015-10-16,59.96,60.29,59.455,59.93,12860812,SBUX +2015-10-19,60.13,61.29,59.8745,60.97,8117213,SBUX +2015-10-20,61.22,61.36,60.56,60.88,6089097,SBUX +2015-10-21,61.07,61.12,60.16,60.53,6063923,SBUX +2015-10-22,60.96,61.7099,60.17,61.49,9182031,SBUX +2015-10-23,62.11,62.8,61.6201,62.61,8205994,SBUX +2015-10-26,62.98,63.84,62.97,63.43,9751716,SBUX +2015-10-27,63.37,63.41,62.19,62.71,8973243,SBUX +2015-10-28,63.11,63.52,62.42,63.51,9627260,SBUX +2015-10-29,63.42,63.5,61.713,62.5,14839093,SBUX +2015-10-30,63.69,64.0,62.26,62.57,16822302,SBUX +2015-11-02,63.01,63.1,62.12,62.24,8547237,SBUX +2015-11-03,62.0,62.975,61.65,62.8,8847718,SBUX +2015-11-04,63.0355,63.0355,61.34,61.96,9085091,SBUX +2015-11-05,62.17,62.46,62.01,62.28,6144979,SBUX +2015-11-06,62.05,62.24,61.61,61.97,6616305,SBUX +2015-11-09,61.75,61.97,60.86,61.34,6838326,SBUX +2015-11-10,61.54,62.32,61.21,62.18,6689040,SBUX +2015-11-11,62.55,62.57,61.81,61.87,4437315,SBUX +2015-11-12,61.34,61.65,60.75,61.07,6793779,SBUX +2015-11-13,60.89,61.345,59.61,59.74,8821593,SBUX +2015-11-16,59.5,60.69,59.5,60.68,8096603,SBUX +2015-11-17,60.95,61.56,60.435,60.55,6695251,SBUX +2015-11-18,60.66,61.865,60.33,61.8,7215255,SBUX +2015-11-19,61.8,61.93,61.39,61.46,5154366,SBUX +2015-11-20,61.96,62.15,61.58,61.99,8302476,SBUX +2015-11-23,62.14,63.19,62.1,62.64,8493485,SBUX +2015-11-24,62.06,62.37,61.2199,61.96,7908223,SBUX +2015-11-25,62.05,62.5,62.0,62.19,4549913,SBUX +2015-11-27,62.19,62.38,61.93,62.18,2447902,SBUX +2015-11-30,62.1,62.29,61.201,61.39,9863771,SBUX +2015-12-01,61.08,61.68,60.51,61.37,10910838,SBUX +2015-12-02,61.63,61.71,61.115,61.22,6587454,SBUX +2015-12-03,61.37,61.4468,59.15,59.55,12056103,SBUX +2015-12-04,59.86,61.87,59.6,61.75,9100588,SBUX +2015-12-07,61.75,61.95,61.44,61.89,5967809,SBUX +2015-12-08,61.69,62.43,61.52,62.16,6664947,SBUX +2015-12-09,61.71,62.538999999999994,60.82,61.18,8541573,SBUX +2015-12-10,61.13,62.14,61.01,61.87,6623896,SBUX +2015-12-11,60.86,61.19,59.6,59.82,11489255,SBUX +2015-12-14,60.04,60.14,58.61,59.92,13453719,SBUX +2015-12-15,60.55,60.68,59.97,59.98,7842073,SBUX +2015-12-16,60.32,60.5,59.51,60.35,9281835,SBUX +2015-12-17,60.66,60.83,59.47,59.515,9079430,SBUX +2015-12-18,59.2,59.5,58.27,58.62,18099462,SBUX +2015-12-21,58.89,59.615,58.66,59.54,7187470,SBUX +2015-12-22,59.94,60.07,59.275,59.99,6501424,SBUX +2015-12-23,60.26,60.37,59.96,60.34,4510229,SBUX +2015-12-24,60.37,60.51,60.17,60.32,2215418,SBUX +2015-12-28,60.02,60.33,59.58,60.19,4437236,SBUX +2015-12-29,60.46,61.32,60.35,61.13,5477335,SBUX +2015-12-30,61.22,61.4,60.75,60.82,3973912,SBUX +2015-12-31,60.65,60.81,60.02,60.03,4960875,SBUX +2016-01-04,58.77,58.83,57.6,58.26,13521544,SBUX +2016-01-05,58.79,58.79,57.98,58.65,9617778,SBUX +2016-01-06,57.7,58.53,57.64,58.13,8266322,SBUX +2016-01-07,56.88,57.91,56.16,56.69,11140877,SBUX +2016-01-08,57.41,57.73,56.53,56.63,10427021,SBUX +2016-01-11,57.0,58.12,56.78,57.82,10757313,SBUX +2016-01-12,58.39,59.53,58.18,59.46,12375826,SBUX +2016-01-13,59.8,60.0,57.8001,57.87,11303603,SBUX +2016-01-14,57.51,59.43,56.92100000000001,58.98,11444106,SBUX +2016-01-15,57.07,58.39,56.75,58.0,15246127,SBUX +2016-01-19,58.67,59.39,58.12,58.55,12288950,SBUX +2016-01-20,57.57,57.96,54.94,56.92,22786359,SBUX +2016-01-21,57.84,59.38,57.67,59.03,20888519,SBUX +2016-01-22,57.55,59.4,57.41,59.17,32820193,SBUX +2016-01-25,59.36,59.38,57.61,57.71,13554262,SBUX +2016-01-26,57.92,58.865,57.8,58.61,8898100,SBUX +2016-01-27,58.87,58.96,57.255,57.63,12491252,SBUX +2016-01-28,58.29,59.42,58.0,59.285,11832368,SBUX +2016-01-29,59.78,60.88,59.64,60.77,13224438,SBUX +2016-02-01,60.66,61.785,60.27,61.4,9529094,SBUX +2016-02-02,60.66,60.9,60.18,60.695,9407352,SBUX +2016-02-03,60.88,61.13,58.5,59.53,12254460,SBUX +2016-02-04,59.41,59.4487,57.99,58.29,13944926,SBUX +2016-02-05,58.1,58.2,54.25,54.49,24529008,SBUX +2016-02-08,53.09,54.47,52.63,54.14,21457492,SBUX +2016-02-09,53.19,55.2886,53.17,54.42,11605059,SBUX +2016-02-10,55.28,56.35,55.01,55.14,11663942,SBUX +2016-02-11,53.89,55.39,53.55,54.92,12106062,SBUX +2016-02-12,55.56,56.04,55.04,55.86,8680205,SBUX +2016-02-16,56.79,56.85,55.98,56.41,11594766,SBUX +2016-02-17,56.7,57.66,56.16,57.63,11955188,SBUX +2016-02-18,57.57,57.57,56.67,56.96,8493953,SBUX +2016-02-19,56.92,57.86,56.52,57.67,9033620,SBUX +2016-02-22,58.63,58.95,58.17,58.87,8390689,SBUX +2016-02-23,58.45,58.9,58.0,58.46,7064095,SBUX +2016-02-24,57.21,58.35,56.28,58.11,10780882,SBUX +2016-02-25,58.46,58.75,58.0,58.75,6262127,SBUX +2016-02-26,59.0,59.21,57.92,58.34,7473374,SBUX +2016-02-29,58.25,59.15,58.1,58.21,7645081,SBUX +2016-03-01,58.77,60.2,58.5,60.04,9183562,SBUX +2016-03-02,59.83,60.0,58.83,59.56,8856392,SBUX +2016-03-03,59.12,59.2,58.2,59.04,8262455,SBUX +2016-03-04,59.14,59.19,58.23,58.7,8344773,SBUX +2016-03-07,58.44,58.67,57.31,58.0,9204624,SBUX +2016-03-08,57.58,58.23,57.26,57.6,8127426,SBUX +2016-03-09,57.78,57.97,56.79,57.07,9734589,SBUX +2016-03-10,57.51,57.86,56.92,57.52,7023785,SBUX +2016-03-11,58.1,58.1,56.57,57.59,15497560,SBUX +2016-03-14,57.58,58.78,57.5,58.65,9053250,SBUX +2016-03-15,58.32,59.1566,58.17,59.08,7428469,SBUX +2016-03-16,58.65,59.82,58.65,59.67,8069427,SBUX +2016-03-17,59.47,59.98,59.37,59.55,7734658,SBUX +2016-03-18,59.91,60.45,59.4295,59.7,14313578,SBUX +2016-03-21,59.56,59.8609,59.015,59.1,6487185,SBUX +2016-03-22,59.0,59.55,58.57,59.38,8246837,SBUX +2016-03-23,59.14,59.395,58.69,58.83,5794495,SBUX +2016-03-24,58.7,58.79,58.28,58.36,5948307,SBUX +2016-03-28,58.56,59.47,58.4,58.96,5791603,SBUX +2016-03-29,58.82,59.735,58.82,59.55,6031947,SBUX +2016-03-30,60.0,60.26,59.51,60.01,5723499,SBUX +2016-03-31,59.77,60.21,59.68,59.7,5622834,SBUX +2016-04-01,59.61,61.17,59.41,61.02,9401126,SBUX +2016-04-04,61.1,61.1839,60.08,60.25,5799864,SBUX +2016-04-05,59.88,60.23,59.44,60.04,4994792,SBUX +2016-04-06,60.02,60.91,59.91,60.83,5667253,SBUX +2016-04-07,60.59,61.54,60.54,61.17,8239174,SBUX +2016-04-08,61.5,61.64,60.7,61.04,5064894,SBUX +2016-04-11,61.22,61.5,60.78,60.9,6103358,SBUX +2016-04-12,58.95,59.68,58.37,59.5,17565750,SBUX +2016-04-13,60.32,61.08,59.75,60.21,9898527,SBUX +2016-04-14,60.26,60.4,59.91,60.13,5157368,SBUX +2016-04-15,60.24,60.6246,60.01,60.51,5965310,SBUX +2016-04-18,60.69,61.07,60.355,60.89,7228573,SBUX +2016-04-19,61.16,61.25,60.48,60.9,7283570,SBUX +2016-04-20,61.04,61.43,60.85,60.9,5558770,SBUX +2016-04-21,60.9,61.1,60.48,60.64,12799083,SBUX +2016-04-22,59.01,59.1,57.03,57.68,29836693,SBUX +2016-04-25,57.62,57.96,57.58,57.77,8428038,SBUX +2016-04-26,58.05,58.67,57.56,57.72,8839067,SBUX +2016-04-27,57.51,57.65,56.62,56.9,12390767,SBUX +2016-04-28,56.59,57.36,56.32,56.42,9196560,SBUX +2016-04-29,56.02,56.43,55.29,56.23,12133364,SBUX +2016-05-02,56.29,57.37,56.11,57.36,8616189,SBUX +2016-05-03,56.7,57.059,56.14,56.25,7854260,SBUX +2016-05-04,55.98,56.65,55.8,56.39,6508507,SBUX +2016-05-05,56.37,56.77,56.01,56.25,6215367,SBUX +2016-05-06,55.96,56.32,55.38,56.31,6378690,SBUX +2016-05-09,56.32,56.93,56.23,56.64,5976229,SBUX +2016-05-10,56.85,57.6,56.71,57.49,7931185,SBUX +2016-05-11,57.13,57.39,56.09,56.23,8118920,SBUX +2016-05-12,56.57,56.79,55.82,56.3,6870281,SBUX +2016-05-13,56.43,56.6,55.73,55.82,5466041,SBUX +2016-05-16,55.7,55.77,55.2,55.53,9536192,SBUX +2016-05-17,55.38,55.64,54.51,54.88,10530018,SBUX +2016-05-18,54.76,55.17,54.38,54.8,7468496,SBUX +2016-05-19,54.43,54.615,54.19,54.55,7582848,SBUX +2016-05-20,54.88,55.3736,54.58,54.62,8430783,SBUX +2016-05-23,54.62,54.8171,54.291000000000004,54.6,7352054,SBUX +2016-05-24,54.74,55.62,54.68,55.44,7748697,SBUX +2016-05-25,55.2,55.46,54.95,55.15,8126058,SBUX +2016-05-26,55.55,55.8699,54.95,55.29,9451708,SBUX +2016-05-27,55.36,55.55,55.1,55.15,6631120,SBUX +2016-05-31,55.5,55.5,54.7,54.89,12043976,SBUX +2016-06-01,54.76,55.49,54.72,54.82,8761577,SBUX +2016-06-02,54.9,55.0,54.455,54.62,8307488,SBUX +2016-06-03,54.71,55.08,54.4,54.61,6649224,SBUX +2016-06-06,54.72,55.87,54.69,55.59,9900917,SBUX +2016-06-07,55.65,56.1,55.28,55.3,7173072,SBUX +2016-06-08,55.5,55.5,54.9,55.22,7623851,SBUX +2016-06-09,55.15,55.61,55.06,55.58,5927854,SBUX +2016-06-10,54.92,55.2,54.5,54.865,8118651,SBUX +2016-06-13,54.79,55.6,54.76,55.04,7928722,SBUX +2016-06-14,55.05,55.58,55.0101,55.57,8036517,SBUX +2016-06-15,55.64,56.09,55.27,55.35,7447317,SBUX +2016-06-16,54.9,55.59,54.41,55.53,7968033,SBUX +2016-06-17,55.61,55.62,55.04,55.31,9503017,SBUX +2016-06-20,55.77,56.28,55.38,55.38,7286681,SBUX +2016-06-21,55.52,56.03,55.45,55.81,7445109,SBUX +2016-06-22,55.88,55.98,55.49,55.61,7215411,SBUX +2016-06-23,55.98,56.195,55.9,56.13,5569431,SBUX +2016-06-24,54.05,55.57,54.01,54.68,14654672,SBUX +2016-06-27,54.2,54.48,53.41,53.69,11650798,SBUX +2016-06-28,54.1,54.9,53.95,54.85,8416950,SBUX +2016-06-29,55.42,56.945,55.36,56.74,11103787,SBUX +2016-06-30,56.81,57.19,56.516000000000005,57.12,10215193,SBUX +2016-07-01,57.04,57.36,56.845,56.99,8330308,SBUX +2016-07-05,56.81,56.96,56.55,56.77,7274208,SBUX +2016-07-06,56.52,57.11,56.32,56.75,8474188,SBUX +2016-07-07,56.66,57.0,56.47,56.91,6813347,SBUX +2016-07-08,56.92,57.0,56.36,56.51,12655500,SBUX +2016-07-11,56.8,56.92,56.06,56.32,10394366,SBUX +2016-07-12,56.65,57.6,56.505,57.48,10998486,SBUX +2016-07-13,56.8,57.26,56.35,56.48,12183638,SBUX +2016-07-14,57.0,57.68,56.97,57.59,11353599,SBUX +2016-07-15,57.69,57.74,57.125,57.41,8494000,SBUX +2016-07-18,57.59,57.59,56.85,56.92,7614635,SBUX +2016-07-19,56.85,57.135,56.545,56.76,8719772,SBUX +2016-07-20,57.0,57.66,56.705,57.54,9446396,SBUX +2016-07-21,57.62,57.67,57.0375,57.6,16151699,SBUX +2016-07-22,57.6,58.24,57.2,57.9,23899275,SBUX +2016-07-25,57.72,58.09,57.5,57.95,10486019,SBUX +2016-07-26,58.58,58.84,58.2,58.31,10106663,SBUX +2016-07-27,58.38,58.4,57.67,57.85,6551777,SBUX +2016-07-28,57.88,58.31,57.74,58.21,6830059,SBUX +2016-07-29,58.18,58.43,57.92,58.05,6914907,SBUX +2016-08-01,58.0,58.05,57.43,57.63,7997576,SBUX +2016-08-02,57.25,57.34,56.54,56.73,7574622,SBUX +2016-08-03,56.46,56.59,55.72,55.94,11484792,SBUX +2016-08-04,56.05,56.29,55.38,55.42,11193748,SBUX +2016-08-05,55.8,56.12,55.52,55.9,9206197,SBUX +2016-08-08,55.97,55.99,55.17,55.36,9129307,SBUX +2016-08-09,55.39,55.71,55.18,55.2,7136683,SBUX +2016-08-10,55.37,55.71,55.11,55.62,6991077,SBUX +2016-08-11,55.75,55.96,55.46,55.47,6191412,SBUX +2016-08-12,55.27,55.745,55.23,55.47,5039757,SBUX +2016-08-15,55.65,55.7,55.18,55.25,5968728,SBUX +2016-08-16,55.25,55.57,54.92,55.37,5751178,SBUX +2016-08-17,55.77,55.92,55.43,55.8,7410631,SBUX +2016-08-18,55.78,55.9,55.49,55.53,5390990,SBUX +2016-08-19,55.46,55.56,54.85,54.94,8981214,SBUX +2016-08-22,54.98,55.92,54.95,55.85,8837808,SBUX +2016-08-23,56.17,56.54,56.0,56.4,7827873,SBUX +2016-08-24,57.0,57.98,56.95,57.09,13200460,SBUX +2016-08-25,57.04,57.45,56.9,57.29,6686589,SBUX +2016-08-26,57.48,57.83,56.995,57.29,6940511,SBUX +2016-08-29,57.22,57.48,56.61,56.8,7026700,SBUX +2016-08-30,56.66,56.75,56.01,56.4,6377668,SBUX +2016-08-31,56.31,56.42,55.905,56.23,6996894,SBUX +2016-09-01,56.3,56.56,55.83,56.31,6230148,SBUX +2016-09-02,56.52,56.65,55.985,56.18,7441463,SBUX +2016-09-06,56.18,56.42,55.69,56.02,6472907,SBUX +2016-09-07,56.19,56.6,56.12,56.32,11428644,SBUX +2016-09-08,56.1,56.15,55.2,55.3,12673626,SBUX +2016-09-09,55.14,55.2,54.3,54.35,10658120,SBUX +2016-09-12,53.92,54.79,53.92,54.71,11002544,SBUX +2016-09-13,54.39,54.55,53.75,53.98,10050215,SBUX +2016-09-14,54.26,54.35,53.8,53.9,6707850,SBUX +2016-09-15,53.96,54.13,53.54,54.11,8080426,SBUX +2016-09-16,53.94,54.09,53.41,53.74,10207750,SBUX +2016-09-19,53.96,53.9739,52.9,53.01,9231628,SBUX +2016-09-20,53.4,53.435,53.05,53.3,8731499,SBUX +2016-09-21,53.43,54.03,53.28,53.98,9213637,SBUX +2016-09-22,54.2,54.6,53.965,54.39,9096709,SBUX +2016-09-23,54.0,54.56,54.0,54.43,6945147,SBUX +2016-09-26,54.28,54.34,53.86,54.04,7755629,SBUX +2016-09-27,54.0,54.21,53.82,54.19,6463747,SBUX +2016-09-28,53.88,54.08,53.53,53.98,8593496,SBUX +2016-09-29,53.88,53.995,52.91,53.45,11993517,SBUX +2016-09-30,53.65,54.385,53.56,54.14,13767754,SBUX +2016-10-03,54.1,54.15,53.665,53.84,5479037,SBUX +2016-10-04,54.13,54.13,53.41,53.53,6176358,SBUX +2016-10-05,53.5,53.76,53.275,53.35,7451916,SBUX +2016-10-06,53.3,53.48,53.03,53.14,6130270,SBUX +2016-10-07,53.37,53.605,53.0,53.46,7279723,SBUX +2016-10-10,53.53,53.6,53.27,53.3,7224335,SBUX +2016-10-11,53.13,53.4,52.74,52.92,9720407,SBUX +2016-10-12,53.01,53.47,52.78,53.16,6320462,SBUX +2016-10-13,52.88,53.13,52.6662,52.95,6958128,SBUX +2016-10-14,53.12,53.37,52.96,53.08,6430136,SBUX +2016-10-17,52.94,53.145,52.69,52.76,5223526,SBUX +2016-10-18,53.24,53.3197,52.59,52.61,6550399,SBUX +2016-10-19,52.91,53.74,52.9,53.15,9095261,SBUX +2016-10-20,53.36,53.74,52.91,53.59,9286800,SBUX +2016-10-21,53.42,53.7,53.25,53.63,6767204,SBUX +2016-10-24,53.9,54.46,53.8939,54.18,6919714,SBUX +2016-10-25,54.1,54.17,53.5,53.67,6052830,SBUX +2016-10-26,53.6,53.84,53.355,53.63,5817798,SBUX +2016-10-27,53.6,53.83,53.13,53.59,7899957,SBUX +2016-10-28,53.65,53.84,53.11,53.53,6620333,SBUX +2016-10-31,53.7,53.7,53.055,53.07,9142509,SBUX +2016-11-01,53.14,53.21,52.085,52.5,15425819,SBUX +2016-11-02,52.34,53.46,52.31,52.98,10851658,SBUX +2016-11-03,52.99,53.0,51.34,51.77,21847292,SBUX +2016-11-04,51.43,53.74,50.84,52.75,21956848,SBUX +2016-11-07,53.5,54.68,53.19,54.49,14916848,SBUX +2016-11-08,54.4,54.79,54.115,54.62,9351994,SBUX +2016-11-09,53.2,54.82,52.8,54.58,13727777,SBUX +2016-11-10,54.64,54.817,53.51,53.57,13621701,SBUX +2016-11-11,53.43,53.99,53.25,53.93,8436435,SBUX +2016-11-14,53.93,54.47,53.5,54.22,10489826,SBUX +2016-11-15,54.09,54.69,53.9,54.59,9588036,SBUX +2016-11-16,54.33,55.52,54.26,55.44,10779155,SBUX +2016-11-17,55.215,55.9,55.06,55.85,8744504,SBUX +2016-11-18,55.72,56.12,55.42,55.77,8740953,SBUX +2016-11-21,55.51,56.16,55.51,56.1,8004000,SBUX +2016-11-22,56.32,57.15,55.88,57.12,10268720,SBUX +2016-11-23,56.91,57.64,56.9,57.59,8183628,SBUX +2016-11-25,57.7,57.7,57.255,57.43,3228848,SBUX +2016-11-28,57.0,57.86,56.76,57.59,8750925,SBUX +2016-11-29,57.64,58.21,57.5,58.17,10582850,SBUX +2016-11-30,58.19,58.25,57.86,57.97,9527959,SBUX +2016-12-01,57.34,58.52,57.2,58.51,12381607,SBUX +2016-12-02,56.648999999999994,57.75,56.57,57.21,16869957,SBUX +2016-12-05,56.96,57.84,56.96,57.5,7701167,SBUX +2016-12-06,57.66,57.7,57.14,57.44,7035674,SBUX +2016-12-07,57.54,58.85,57.45,58.76,9094812,SBUX +2016-12-08,59.0,59.25,58.4118,58.65,7972498,SBUX +2016-12-09,58.92,58.95,58.43,58.75,7091577,SBUX +2016-12-12,58.54,58.79,58.34,58.77,7736198,SBUX +2016-12-13,58.99,59.54,58.66,59.31,8878080,SBUX +2016-12-14,59.03,59.25,58.6,58.75,9022867,SBUX +2016-12-15,57.9675,58.21,57.52,57.71,11837756,SBUX +2016-12-16,58.01,58.07,57.56,57.66,10611461,SBUX +2016-12-19,57.44,57.98,57.44,57.65,6433824,SBUX +2016-12-20,57.81,58.06,57.32,57.7,4888284,SBUX +2016-12-21,57.5,57.87,57.41,57.44,5380537,SBUX +2016-12-22,57.31,57.4,56.72,57.11,6777656,SBUX +2016-12-23,57.29,57.36,56.89,57.01,4298476,SBUX +2016-12-27,56.99,57.3869,56.81,56.86,4186157,SBUX +2016-12-28,56.8,56.9,56.25,56.35,5548726,SBUX +2016-12-29,56.35,56.47,56.135,56.32,3781721,SBUX +2016-12-30,56.28,56.45,55.4,55.52,8344508,SBUX +2017-01-03,55.91,55.95,55.04,55.35,7809307,SBUX +2017-01-04,55.56,56.195,55.38,55.99,7796290,SBUX +2017-01-05,56.08,56.53,55.8099,56.46,7602321,SBUX +2017-01-06,56.63,57.27,56.08,57.13,8587812,SBUX +2017-01-09,57.26,58.335,57.25,58.2,12640515,SBUX +2017-01-10,58.22,58.26,57.83,57.88,6672024,SBUX +2017-01-11,57.8,58.12,57.59,58.1,6027960,SBUX +2017-01-12,58.0,58.13,57.64,58.03,4733015,SBUX +2017-01-13,58.03,58.1,57.65,57.85,4745840,SBUX +2017-01-17,57.62,58.25,57.41,58.0,5734666,SBUX +2017-01-18,58.32,58.58,58.03,58.45,7375725,SBUX +2017-01-19,58.31,58.45,57.715,57.89,7850480,SBUX +2017-01-20,58.14,58.2,57.41,57.66,7651562,SBUX +2017-01-23,57.42,57.9,57.15,57.76,6814368,SBUX +2017-01-24,57.93,58.5,57.76,58.44,10704103,SBUX +2017-01-25,58.67,58.93,58.45,58.7,7124547,SBUX +2017-01-26,58.7,59.0,58.26,58.46,12382416,SBUX +2017-01-27,55.75,56.59,55.65,56.12,28884899,SBUX +2017-01-30,56.0,56.24,55.58,55.9,13322010,SBUX +2017-01-31,55.8,55.87,54.88,55.22,14307985,SBUX +2017-02-01,55.49,55.5,53.81,53.9,18796871,SBUX +2017-02-02,54.04,54.39,53.85,53.87,15289650,SBUX +2017-02-03,54.21,55.1,54.01,55.06,14161693,SBUX +2017-02-06,55.01,55.75,54.9,55.73,13029829,SBUX +2017-02-07,55.79,55.84,55.1801,55.24,9910498,SBUX +2017-02-08,55.19,55.4957,55.1,55.22,11681938,SBUX +2017-02-09,55.23,56.12,55.21,55.81,11106757,SBUX +2017-02-10,55.73,56.395,55.56,56.22,11178950,SBUX +2017-02-13,56.5,56.66,56.03,56.11,8027939,SBUX +2017-02-14,56.02,56.61,56.02,56.58,8865947,SBUX +2017-02-15,56.56,56.88,56.305,56.86,6967179,SBUX +2017-02-16,56.96,56.99,56.53,56.73,8524519,SBUX +2017-02-17,56.8,57.57,56.71,57.35,11008366,SBUX +2017-02-21,57.41,57.81,57.4,57.54,8289185,SBUX +2017-02-22,57.52,57.85,57.35,57.57,7876599,SBUX +2017-02-23,57.6,57.79,57.39,57.64,7178627,SBUX +2017-02-24,57.61,57.71,57.145,57.48,7806190,SBUX +2017-02-27,57.24,57.3,56.66,56.78,7702400,SBUX +2017-02-28,56.71,57.06,56.55,56.87,8750655,SBUX +2017-03-01,57.27,57.4,56.94,57.14,7197973,SBUX +2017-03-02,57.07,57.19,56.85,57.12,6595418,SBUX +2017-03-03,56.7,57.26,56.7,57.1,7738064,SBUX +2017-03-06,56.78,56.81,56.33,56.68,9159983,SBUX +2017-03-07,56.58,56.75,56.02,56.2,10890313,SBUX +2017-03-08,56.15,56.35,55.54,55.74,13061632,SBUX +2017-03-09,55.75,55.8,54.81,55.19,17844248,SBUX +2017-03-10,55.39,55.4,54.415,54.53,13886431,SBUX +2017-03-13,54.57,54.8847,54.4,54.63,9090230,SBUX +2017-03-14,54.62,54.74,54.19,54.27,7892888,SBUX +2017-03-15,54.39,54.65,54.09,54.54,8712006,SBUX +2017-03-16,54.85,54.985,54.66,54.8,8074278,SBUX +2017-03-17,55.04,56.13,54.95,55.78,15822141,SBUX +2017-03-20,55.87,56.05,55.51,55.81,7948425,SBUX +2017-03-21,56.05,56.45,55.485,55.54,8030642,SBUX +2017-03-22,55.68,56.04,55.51,55.89,8452070,SBUX +2017-03-23,56.15,56.425,55.78,55.85,7357207,SBUX +2017-03-24,56.11,57.38,55.9,56.81,15763000,SBUX +2017-03-27,56.66,57.45,56.46,57.23,8861799,SBUX +2017-03-28,57.1,57.52,57.025,57.35,8331644,SBUX +2017-03-29,57.17,57.85,57.13,57.54,7001635,SBUX +2017-03-30,57.45,58.3,57.42,58.16,8677916,SBUX +2017-03-31,58.105,58.66,58.06,58.39,9156707,SBUX +2017-04-03,58.28,58.47,57.89,58.44,8989831,SBUX +2017-04-04,58.37,58.41,58.01,58.32,6474781,SBUX +2017-04-05,57.96,59.27,57.81,58.22,13656221,SBUX +2017-04-06,58.13,58.36,57.73,57.92,9112533,SBUX +2017-04-07,57.8,58.25,57.55,58.02,6773750,SBUX +2017-04-10,58.0,58.21,57.73,57.95,5266312,SBUX +2017-04-11,57.74,58.16,57.38,57.88,5855091,SBUX +2017-04-12,57.88,57.895,57.48,57.58,5450298,SBUX +2017-04-13,57.61,57.79,57.45,57.51,4871717,SBUX +2017-04-17,57.68,58.25,57.68,58.08,5626042,SBUX +2017-04-18,57.59,58.48,57.59,58.35,5449699,SBUX +2017-04-19,58.6,59.49,58.5,59.04,12000639,SBUX +2017-04-20,59.71,60.335,59.56,60.08,12445862,SBUX +2017-04-21,60.2,60.69,60.15,60.61,8799985,SBUX +2017-04-24,61.0,61.38,60.89,61.11,10721276,SBUX +2017-04-25,60.65,61.21,59.9237,60.96,11031475,SBUX +2017-04-26,61.23,61.75,60.98,61.56,8525419,SBUX +2017-04-27,61.63,61.94,61.19,61.3,15285342,SBUX +2017-04-28,59.41,60.18,58.99,60.06,25046130,SBUX +2017-05-01,60.0,60.6,59.7619,60.18,10910341,SBUX +2017-05-02,60.15,60.52,60.04,60.5,9152088,SBUX +2017-05-03,60.52,60.665,60.255,60.59,7706367,SBUX +2017-05-04,60.65,60.85,60.39,60.83,6874286,SBUX +2017-05-05,60.86,60.99,60.58,60.95,6443309,SBUX +2017-05-08,61.07,61.07,60.7,60.94,5588371,SBUX +2017-05-09,60.58,61.08,60.57,60.98,5806562,SBUX +2017-05-10,60.88,60.98,60.21,60.66,7198414,SBUX +2017-05-11,60.45,60.51,60.03,60.27,5516893,SBUX +2017-05-12,60.28,60.34,59.83,59.93,5647531,SBUX +2017-05-15,60.42,60.49,60.125,60.45,5904094,SBUX +2017-05-16,60.68,60.7,59.88,59.98,6303480,SBUX +2017-05-17,59.97,60.3239,59.55,59.73,7581230,SBUX +2017-05-18,59.73,59.94,58.87,59.82,8602411,SBUX +2017-05-19,59.94,61.92,59.94,61.36,12530995,SBUX +2017-05-22,61.03,61.72,61.03,61.23,6392124,SBUX +2017-05-23,61.4,61.5,60.86,61.15,5622524,SBUX +2017-05-24,61.4,62.0,60.94,61.89,7283789,SBUX +2017-05-25,62.01,63.11,61.9,62.9,8931804,SBUX +2017-05-26,63.01,63.42,62.97,63.3,6097990,SBUX +2017-05-30,63.07,63.41,63.01,63.26,7094586,SBUX +2017-05-31,63.27,63.61,63.08,63.61,7314634,SBUX +2017-06-01,63.51,63.82,63.35,63.75,6058263,SBUX +2017-06-02,63.88,64.68,63.7,64.57,7840374,SBUX +2017-06-05,64.85,64.87,64.18,64.27,6809284,SBUX +2017-06-06,64.22,64.35,64.05,64.16,5448439,SBUX +2017-06-07,64.13,64.295,63.34,63.5,8364994,SBUX +2017-06-08,63.44,63.58,62.02,62.24,11289266,SBUX +2017-06-09,62.37,62.48,61.8745,62.19,11240487,SBUX +2017-06-12,61.8,61.99,60.63,61.29,11071593,SBUX +2017-06-13,61.12,61.255,60.59,60.92,9384906,SBUX +2017-06-14,60.67,60.82,59.86,60.27,9703332,SBUX +2017-06-15,59.92,60.28,59.51,60.09,7515980,SBUX +2017-06-16,59.89,60.16,59.47,60.14,11522438,SBUX +2017-06-19,60.35,61.0,60.11,60.9,6778024,SBUX +2017-06-20,60.98,61.0,59.7,59.86,6985666,SBUX +2017-06-21,60.0,60.31,59.71,59.96,6027647,SBUX +2017-06-22,60.09,60.1,59.4,59.51,5602002,SBUX +2017-06-23,59.76,60.17,59.58,59.81,6469495,SBUX +2017-06-26,60.02,60.15,59.33,59.64,5674637,SBUX +2017-06-27,59.54,59.69,58.81,58.96,5652429,SBUX +2017-06-28,59.06,59.25,58.8,59.18,5419169,SBUX +2017-06-29,59.17,59.18,57.955,58.36,7421177,SBUX +2017-06-30,58.68,58.95,58.29,58.31,8117066,SBUX +2017-07-03,58.9,58.99,58.25,58.25,4575268,SBUX +2017-07-05,58.43,58.5,57.8,57.94,7773566,SBUX +2017-07-06,57.8,57.92,57.4,57.6,8886648,SBUX +2017-07-07,57.79,58.36,57.54,58.04,7278250,SBUX +2017-07-10,58.18,58.35,57.75,57.81,4832094,SBUX +2017-07-11,57.91,58.08,57.53,57.9,5422330,SBUX +2017-07-12,58.21,58.71,58.02,58.54,7141916,SBUX +2017-07-13,58.8,58.87,58.12,58.38,8460245,SBUX +2017-07-14,58.4,58.92,58.28,58.76,5441377,SBUX +2017-07-17,58.73,58.87,58.28,58.33,6774471,SBUX +2017-07-18,58.12,58.58,57.69,58.21,7857464,SBUX +2017-07-19,58.15,58.41,57.9,58.11,8203557,SBUX +2017-07-20,58.41,58.84,58.0,58.03,10546701,SBUX +2017-07-21,57.92,58.26,57.83,57.98,6717235,SBUX +2017-07-24,58.0,58.25,57.93,58.02,7442589,SBUX +2017-07-25,58.215,58.84,57.98,58.55,7933137,SBUX +2017-07-26,58.8,58.84,57.7847,57.94,8775889,SBUX +2017-07-27,58.25,59.66,57.93,59.5,23286716,SBUX +2017-07-28,55.23,55.96,53.41,54.0,53454789,SBUX +2017-07-31,54.48,54.68,53.95,53.98,20299407,SBUX +2017-08-01,54.57,54.79,53.97,54.73,18120912,SBUX +2017-08-02,54.75,55.45,54.6702,55.43,14764854,SBUX +2017-08-03,55.64,56.12,55.5,55.68,13331459,SBUX +2017-08-04,55.97,56.05,55.09,55.44,9179779,SBUX +2017-08-07,55.6,55.93,55.42,55.63,7253947,SBUX +2017-08-08,55.55,55.58,54.36,54.52,11095259,SBUX +2017-08-09,54.43,54.43,53.3,53.74,16717719,SBUX +2017-08-10,53.52,53.7453,52.99,53.07,13235301,SBUX +2017-08-11,53.05,53.4,53.05,53.18,9235033,SBUX +2017-08-14,53.6,53.6,53.18,53.22,7426467,SBUX +2017-08-15,53.41,53.42,52.89,53.15,6674597,SBUX +2017-08-16,53.26,53.92,53.25,53.5,7667081,SBUX +2017-08-17,53.32,53.78,52.99,53.04,7451679,SBUX +2017-08-18,52.92,53.085,52.58,52.7,10370499,SBUX +2017-08-21,53.14,53.52,52.8,53.15,12753196,SBUX +2017-08-22,53.49,54.74,53.39,54.45,14547613,SBUX +2017-08-23,53.96,54.1,53.65,54.08,11838533,SBUX +2017-08-24,54.26,54.445,53.77,53.94,8716699,SBUX +2017-08-25,54.62,54.69,54.19,54.36,8601120,SBUX +2017-08-28,54.54,54.58,54.02,54.4,6643652,SBUX +2017-08-29,54.04,54.29,54.0,54.1,6212403,SBUX +2017-08-30,54.04,54.62,54.04,54.52,5596567,SBUX +2017-08-31,54.68,54.99,54.57,54.86,8245287,SBUX +2017-09-01,54.9,55.155,54.88,54.93,7696302,SBUX +2017-09-05,54.95,55.2,54.6,55.13,9178048,SBUX +2017-09-06,55.12,55.275,54.13,54.31,11372298,SBUX +2017-09-07,54.42,54.675,53.25,53.47,13801890,SBUX +2017-09-08,53.39,53.78,53.05,53.49,11774483,SBUX +2017-09-11,53.79,54.13,53.61,54.02,9314079,SBUX +2017-09-12,53.98,54.05,53.33,53.54,10339446,SBUX +2017-09-13,53.55,54.72,53.4866,54.29,12787883,SBUX +2017-09-14,54.38,54.71,54.1,54.53,8260770,SBUX +2017-09-15,54.44,54.79,54.16,54.67,10744783,SBUX +2017-09-18,54.8,54.97,54.57,54.69,6348360,SBUX +2017-09-19,54.84,54.88,54.56,54.62,5187121,SBUX +2017-09-20,54.58,55.18,54.58,55.15,7080035,SBUX +2017-09-21,55.05,55.17,54.86,55.01,7178462,SBUX +2017-09-22,55.05,55.2,54.86,55.09,6950029,SBUX +2017-09-25,54.98,55.22,54.7,54.95,10242655,SBUX +2017-09-26,55.06,55.22,54.795,55.13,8021851,SBUX +2017-09-27,55.25,55.6,54.96,54.99,8671309,SBUX +2017-09-28,54.81,55.08,54.45,54.5,7607473,SBUX +2017-09-29,54.31,54.47,53.36,53.71,11944370,SBUX +2017-10-02,53.86,54.04,53.75,53.81,5955980,SBUX +2017-10-03,54.0,54.34,53.9,53.99,6137069,SBUX +2017-10-04,54.12,54.2,53.69,53.93,5569058,SBUX +2017-10-05,54.06,54.97,53.92,54.6,7994274,SBUX +2017-10-06,54.51,55.45,54.245,55.17,10576278,SBUX +2017-10-09,55.37,55.49,54.96,55.02,5822893,SBUX +2017-10-10,55.1,55.8492,55.08,55.42,6573918,SBUX +2017-10-11,55.46,55.8,55.26,55.64,8881897,SBUX +2017-10-12,55.67,56.27,55.309,55.97,7234267,SBUX +2017-10-13,56.0,56.43,55.61,55.72,6231132,SBUX +2017-10-16,55.67,55.8,54.89,54.91,7256893,SBUX +2017-10-17,54.86,55.23,54.29,54.51,11227337,SBUX +2017-10-18,54.46,55.43,54.22,55.21,8299509,SBUX +2017-10-19,55.08,55.5423,54.9,55.4,5720179,SBUX +2017-10-20,55.31,55.4,54.175,54.57,11741092,SBUX +2017-10-23,54.77,54.935,54.18,54.27,10111071,SBUX +2017-10-24,54.37,54.56,54.02,54.28,7818490,SBUX +2017-10-25,54.24,54.39,53.66,54.16,8281219,SBUX +2017-10-26,54.5,55.75,54.5,54.91,12211440,SBUX +2017-10-27,54.83,55.12,54.68,54.88,9922611,SBUX +2017-10-30,54.79,55.23,54.4,55.17,6430949,SBUX +2017-10-31,55.16,55.305,54.7,54.84,8857829,SBUX +2017-11-01,55.1,55.59,54.94,55.13,7189764,SBUX +2017-11-02,55.15,55.39,54.77,54.87,16879022,SBUX +2017-11-03,54.16,56.94,54.05,56.03,28773774,SBUX +2017-11-06,55.99,56.69,55.63,56.57,10835659,SBUX +2017-11-07,56.33,57.29,56.33,57.22,11167447,SBUX +2017-11-08,57.03,58.01,57.0,57.91,13533654,SBUX +2017-11-09,57.2,57.66,56.785,57.36,14758907,SBUX +2017-11-10,57.36,57.39,56.54,57.04,7930318,SBUX +2017-11-13,56.81,57.14,56.55,56.64,7648192,SBUX +2017-11-14,56.47,57.0,56.41,56.93,7758215,SBUX +2017-11-15,56.82,57.06,56.51,56.7,8880505,SBUX +2017-11-16,56.92,57.42,56.75,57.24,8310611,SBUX +2017-11-17,57.24,57.43,56.84,56.93,6311519,SBUX +2017-11-20,56.67,57.05,56.58,56.81,6360086,SBUX +2017-11-21,56.96,57.56,56.88,57.26,6284259,SBUX +2017-11-22,57.07,57.18,56.795,57.14,7309073,SBUX +2017-11-24,57.19,57.19,56.71,56.8,3479177,SBUX +2017-11-27,56.83,56.9207,55.751000000000005,55.91,10580296,SBUX +2017-11-28,56.03,56.72,55.9101,56.66,9734431,SBUX +2017-11-29,56.71,57.68,56.7,57.51,10017914,SBUX +2017-11-30,57.64,58.1399,57.47,57.82,11509224,SBUX +2017-12-01,57.5,57.71,56.461000000000006,57.32,12756391,SBUX +2017-12-04,57.54,59.19,57.5,58.76,13302050,SBUX +2017-12-05,59.25,59.68,58.91,59.34,11295644,SBUX +2017-12-06,59.38,59.83,59.23,59.28,8458198,SBUX +2017-12-07,59.12,59.275,58.761,59.14,6033792,SBUX +2017-12-08,58.52,58.845,58.1,58.61,9950491,SBUX +2017-12-11,58.39,59.35,58.29,59.07,10286560,SBUX +2017-12-12,58.99,59.36,58.87,59.27,6042917,SBUX +2017-12-13,59.44,59.89,59.3,59.49,7726299,SBUX +2017-12-14,59.73,60.05,59.44,59.7,8946111,SBUX +2017-12-15,59.25,59.37,58.1574,58.29,22595018,SBUX +2017-12-18,58.44,58.786,57.89,58.03,8751620,SBUX +2017-12-19,58.13,58.575,57.93,58.01,7946435,SBUX +2017-12-20,58.22,58.29,57.69,57.73,7188717,SBUX +2017-12-21,57.94,58.145,57.49,57.58,5974474,SBUX +2017-12-22,57.57,57.91,57.12,57.3,7148723,SBUX +2017-12-26,57.27,57.5799,57.05,57.14,5546208,SBUX +2017-12-27,57.19,57.65,57.18,57.27,4812173,SBUX +2017-12-28,57.47,58.0,57.3,57.81,5044505,SBUX +2017-12-29,57.74,57.97,57.42,57.43,5365646,SBUX +2018-01-02,57.95,58.21,57.48,57.63,7215978,SBUX +2018-01-03,57.93,58.96,57.8,58.71,7478356,SBUX +2018-01-04,58.99,59.41,58.73,58.93,5775921,SBUX +2018-01-05,59.25,59.69,59.07,59.61,6047686,SBUX +2018-01-08,59.48,59.67,58.56,59.31,6335782,SBUX +2018-01-09,59.2,59.47,58.86,59.18,5233353,SBUX +2018-01-10,59.24,60.13,58.855,59.82,8656454,SBUX +2018-01-11,59.76,60.02,59.4541,60.0,5806282,SBUX +2018-01-12,60.4,60.51,59.65,60.4,6989075,SBUX +2018-01-16,60.33,61.1,60.3,60.56,8040748,SBUX +2018-01-17,61.0,61.33,60.52,60.66,8433771,SBUX +2018-01-18,61.43,61.44,60.735,61.09,9170903,SBUX +2018-01-19,61.21,61.46,60.95,61.26,8361853,SBUX +2018-01-22,61.04,61.47,60.77,61.41,11945783,SBUX +2018-01-23,61.32,61.91,61.14,61.69,10806783,SBUX +2018-01-24,61.51,61.94,60.2326,60.83,11911867,SBUX +2018-01-25,61.03,61.2,60.4,60.55,16225618,SBUX +2018-01-26,57.94,58.17,56.55,57.99,51851690,SBUX +2018-01-29,57.55,58.35,56.91,57.02,18899867,SBUX +2018-01-30,56.96,57.54,56.74,57.19,14341155,SBUX +2018-01-31,57.23,57.45,56.7,56.81,13118364,SBUX +2018-02-01,56.28,56.42,55.89,56.0,14690146,SBUX +2018-02-02,55.9,56.32,55.7,55.77,15358909,SBUX +2018-02-05,55.53,56.26,54.57,54.69,16059955,SBUX +2018-02-06,53.685,56.06,53.56,55.61,17415065,SBUX +2018-02-07,55.08,55.43,54.44,54.46,13927022,SBUX From 42358e958aff977b06bbe09d058d3a9fe0ac6294 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 3 Aug 2019 14:36:13 -0400 Subject: [PATCH 151/329] update --- rl2/cartpole/pg_tf.py | 4 +-- rl2/cartpole/pg_theano.py | 56 +++++++++++++++++++-------------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/rl2/cartpole/pg_tf.py b/rl2/cartpole/pg_tf.py index 5c271494..d5021eb7 100644 --- a/rl2/cartpole/pg_tf.py +++ b/rl2/cartpole/pg_tf.py @@ -169,8 +169,8 @@ def play_one_td(env, pmodel, vmodel, gamma): # reward = -200 # update the models - V_next = vmodel.predict(observation) - G = reward + gamma*np.max(V_next) + V_next = vmodel.predict(observation)[0] + G = reward + gamma*V_next advantage = G - vmodel.predict(prev_observation) pmodel.partial_fit(prev_observation, action, advantage) vmodel.partial_fit(prev_observation, G) diff --git a/rl2/cartpole/pg_theano.py b/rl2/cartpole/pg_theano.py index 3164dd37..99ac7aec 100644 --- a/rl2/cartpole/pg_theano.py +++ b/rl2/cartpole/pg_theano.py @@ -169,34 +169,34 @@ def predict(self, X): return self.predict_op(X) -# def play_one_td(env, pmodel, vmodel, gamma): -# observation = env.reset() -# done = False -# totalreward = 0 -# iters = 0 - -# while not done and iters < 2000: -# # if we reach 2000, just quit, don't want this going forever -# # the 200 limit seems a bit early -# action = pmodel.sample_action(observation) -# prev_observation = observation -# observation, reward, done, info = env.step(action) - -# if done: -# reward = -200 - -# # update the models -# V_next = vmodel.predict(observation) -# G = reward + gamma*np.max(V_next) -# advantage = G - vmodel.predict(prev_observation) -# pmodel.partial_fit(prev_observation, action, advantage) -# vmodel.partial_fit(prev_observation, G) - -# if reward == 1: # if we changed the reward to -200 -# totalreward += reward -# iters += 1 - -# return totalreward +def play_one_td(env, pmodel, vmodel, gamma): + observation = env.reset() + done = False + totalreward = 0 + iters = 0 + + while not done and iters < 2000: + # if we reach 2000, just quit, don't want this going forever + # the 200 limit seems a bit early + action = pmodel.sample_action(observation) + prev_observation = observation + observation, reward, done, info = env.step(action) + + if done: + reward = -200 + + # update the models + V_next = vmodel.predict(observation) + G = reward + gamma*np.max(V_next) + advantage = G - vmodel.predict(prev_observation) + pmodel.partial_fit(prev_observation, action, advantage) + vmodel.partial_fit(prev_observation, G) + + if reward == 1: # if we changed the reward to -200 + totalreward += reward + iters += 1 + + return totalreward def play_one_mc(env, pmodel, vmodel, gamma): From aff046a442666acb524126fa8d627c33f30ea696 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 9 Aug 2019 00:23:05 -0400 Subject: [PATCH 152/329] update --- nlp_class/spam2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nlp_class/spam2.py b/nlp_class/spam2.py index bba2a72b..c8ae5414 100644 --- a/nlp_class/spam2.py +++ b/nlp_class/spam2.py @@ -54,7 +54,7 @@ model.fit(Xtrain, Ytrain) print("train score:", model.score(Xtrain, Ytrain)) print("test score:", model.score(Xtest, Ytest)) -exit() +# exit() # visualize the data @@ -73,6 +73,7 @@ def visualize(label): # see what we're getting wrong +X = tfidf.transform(df['data']) df['predictions'] = model.predict(X) # things that should be spam From 731865822509d544a8a7fbc43492bd60977d7816 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 14 Aug 2019 23:57:08 -0400 Subject: [PATCH 153/329] update --- tf2.0/extra_reading.txt | 3 +++ tf2.0/xor3d.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 tf2.0/xor3d.py diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt index 7d6d1ba3..7252c4e0 100644 --- a/tf2.0/extra_reading.txt +++ b/tf2.0/extra_reading.txt @@ -1,3 +1,6 @@ +Gradient Descent: Convergence Analysis +http://www.stat.cmu.edu/~ryantibs/convexopt-F13/scribes/lec6.pdf + Deep learning improved by biological activation functions https://arxiv.org/pdf/1804.11237.pdf diff --git a/tf2.0/xor3d.py b/tf2.0/xor3d.py new file mode 100644 index 00000000..4db10096 --- /dev/null +++ b/tf2.0/xor3d.py @@ -0,0 +1,33 @@ +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + + + +def get_label(x, i1, i2, i3): + # x = sequence + if x[i1] < 0 and x[i2] < 0 and x[i3] < 0: + return 1 + if x[i1] < 0 and x[i2] > 0 and x[i3] > 0: + return 1 + if x[i1] > 0 and x[i2] < 0 and x[i3] > 0: + return 1 + if x[i1] > 0 and x[i2] > 0 and x[i3] < 0: + return 1 + return 0 + + +N = 2000 +X = np.random.random((N, 3))*2 - 1 + +Y = np.zeros(N) +for i in range(N): + x = X[i] + y = get_label(x, 0, 1, 2) + Y[i] = y + + +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], X[:,2], c=Y) +plt.show() \ No newline at end of file From de016bbea4d34c106f6416b107b2d44b868cf171 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 24 Aug 2019 00:45:10 -0400 Subject: [PATCH 154/329] update --- cnn_class2/siamese.py | 443 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 443 insertions(+) create mode 100644 cnn_class2/siamese.py diff --git a/cnn_class2/siamese.py b/cnn_class2/siamese.py new file mode 100644 index 00000000..a8e5894f --- /dev/null +++ b/cnn_class2/siamese.py @@ -0,0 +1,443 @@ +# https://deeplearningcourses.com/c/advanced-computer-vision +from __future__ import print_function, division +from builtins import range, input +# Note: you may need to update your version of future +# sudo pip install -U future + +from keras.layers import Input, Lambda, Dense, Flatten, Conv2D, BatchNormalization, Activation, MaxPooling2D +from keras.models import Model +from keras.preprocessing import image + +import keras.backend as K + +import numpy as np +import matplotlib.pyplot as plt + +from glob import glob +from collections import Counter + + +# get the data from: http://vision.ucsd.edu/content/yale-face-database +files = glob('../large_files/yalefaces/subject*') + +# easier to randomize later +np.random.shuffle(files) + +# number of samples +N = len(files) + + +def load_img(filepath): + # load image and downsample + img = image.img_to_array(image.load_img(filepath, target_size=[60, 80])).astype('uint8') + return img + + + +# look at an image for fun +img = load_img(np.random.choice(files)) +plt.imshow(img) +plt.show() + + +# try load images as arrays +# yes, I cheated and checked beforehand that all the images were the same shape! +shape = [N] + list(img.shape) +images = np.zeros(shape) +for i, f in enumerate(files): + # img = image.img_to_array(image.load_img(f)).astype('uint8') + img = load_img(f) + images[i] = img + + +# make the labels +# all the filenames are something like 'subject13.happy' +labels = np.zeros(N) +for i, f in enumerate(files): + filename = f.rsplit('/', 1)[-1] + subject_num = filename.split('.', 1)[0] + + # subtract 1 since the filenames start from 1 + idx = int(subject_num.replace('subject', '')) - 1 + labels[i] = idx + + +# how many of each subject do we have? +label_count = Counter(labels) + +# set of unique labels +unique_labels = set(label_count.keys()) + +# get the number of subjects +n_subjects = len(label_count) + +# let's make it so 3 images for each subject are test data +# number of test points is then +n_test = 3 * n_subjects +n_train = N - n_test + + +# initialize arrays to hold train and test images +train_images = np.zeros([n_train] + list(img.shape)) +train_labels = np.zeros(n_train) +test_images = np.zeros([n_test] + list(img.shape)) +test_labels = np.zeros(n_test) + + +count_so_far = {} +train_idx = 0 +test_idx = 0 +for img, label in zip(images, labels): + # increment the count + count_so_far[label] = count_so_far.get(label, 0) + 1 + + if count_so_far[label] > 3: + # we have already added 3 test images for this subject + # so add the rest to train + train_images[train_idx] = img + train_labels[train_idx] = label + train_idx += 1 + + else: + # add the first 3 images to test + test_images[test_idx] = img + test_labels[test_idx] = label + test_idx += 1 + + +# create label2idx mapping for easy access +train_label2idx = {} +test_label2idx = {} + +for i, label in enumerate(train_labels): + if label not in train_label2idx: + train_label2idx[label] = [i] + else: + train_label2idx[label].append(i) + +for i, label in enumerate(test_labels): + if label not in test_label2idx: + test_label2idx[label] = [i] + else: + test_label2idx[label].append(i) + + +# come up with all possible training sample indices +train_positives = [] +train_negatives = [] +test_positives = [] +test_negatives = [] + +for label, indices in train_label2idx.items(): + # all indices that do NOT belong to this subject + other_indices = set(range(n_train)) - set(indices) + + for i, idx1 in enumerate(indices): + for idx2 in indices[i+1:]: + train_positives.append((idx1, idx2)) + + for idx2 in other_indices: + train_negatives.append((idx1, idx2)) + +for label, indices in test_label2idx.items(): + # all indices that do NOT belong to this subject + other_indices = set(range(n_test)) - set(indices) + + for i, idx1 in enumerate(indices): + for idx2 in indices[i+1:]: + test_positives.append((idx1, idx2)) + + for idx2 in other_indices: + test_negatives.append((idx1, idx2)) + + +batch_size = 64 +def train_generator(): + # for each batch, we will send 1 pair of each subject + # and the same number of non-matching pairs + n_batches = int(np.ceil(len(train_positives) / batch_size)) + + while True: + np.random.shuffle(train_positives) + + n_samples = batch_size * 2 + shape = [n_samples] + list(img.shape) + x_batch_1 = np.zeros(shape) + x_batch_2 = np.zeros(shape) + y_batch = np.zeros(n_samples) + + for i in range(n_batches): + pos_batch_indices = train_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + y_batch[j] = 1 # match + j += 1 + + # get negative samples + neg_indices = np.random.choice(len(train_negatives), size=len(pos_batch_indices), replace=False) + for neg in neg_indices: + idx1, idx2 = train_negatives[neg] + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + y_batch[j] = 0 # non-match + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + y = y_batch[:j] + yield [x1, x2], y + + +# same thing as the train generator except no shuffling and it uses the test set +def test_generator(): + n_batches = int(np.ceil(len(test_positives) / batch_size)) + + while True: + n_samples = batch_size * 2 + shape = [n_samples] + list(img.shape) + x_batch_1 = np.zeros(shape) + x_batch_2 = np.zeros(shape) + y_batch = np.zeros(n_samples) + + for i in range(n_batches): + pos_batch_indices = test_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + y_batch[j] = 1 # match + j += 1 + + # get negative samples + neg_indices = np.random.choice(len(test_negatives), size=len(pos_batch_indices), replace=False) + for neg in neg_indices: + idx1, idx2 = test_negatives[neg] + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + y_batch[j] = 0 # non-match + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + y = y_batch[:j] + yield [x1, x2], y + + + + +# build the base neural network +i = Input(shape=img.shape) +x = Conv2D(filters=32, kernel_size=(3, 3))(i) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Conv2D(filters=64, kernel_size=(3, 3))(x) +x = BatchNormalization()(x) +x = Activation('relu')(x) +x = MaxPooling2D()(x) + +x = Flatten()(x) +x = Dense(units=128, activation='relu')(x) +x = Dense(units=50)(x) # feature vector + +cnn = Model(inputs=i, outputs=x) + + +# feed both images into the same CNN +img_placeholder1 = Input(shape=img.shape) +img_placeholder2 = Input(shape=img.shape) + +# get image features +feat1 = cnn(img_placeholder1) +feat2 = cnn(img_placeholder2) + + +# calculate the Euclidean distance between feature 1 and feature 2 +def euclidean_distance(features): + x, y = features + return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True)) + + +# lambda layer to output distance between feat1 and feat2 +dist_layer = Lambda(euclidean_distance)([feat1, feat2]) + + +# the model we will actually train +model = Model(inputs=[img_placeholder1, img_placeholder2], outputs=dist_layer) + + +# loss function for siamese network +def contrastive_loss(y_true, y_pred): + margin = 1 + return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0))) + + +# compile the model +model.compile( + loss=contrastive_loss, + optimizer='adam', +) + + +# calculate accuracy before training +# since the dataset is imbalanced, we'll report tp, tn, fp, fn +def get_train_accuracy(threshold=0.85): + positive_distances = [] + negative_distances = [] + + tp = 0 + tn = 0 + fp = 0 + fn = 0 + + batch_size = 64 + x_batch_1 = np.zeros([batch_size] + list(img.shape)) + x_batch_2 = np.zeros([batch_size] + list(img.shape)) + n_batches = int(np.ceil(len(train_positives) / batch_size)) + for i in range(n_batches): + print(f"pos batch: {i+1}/{n_batches}") + pos_batch_indices = train_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + positive_distances += distances.tolist() + + # update tp, tn, fp, fn + tp += (distances < threshold).sum() + fn += (distances > threshold).sum() + + n_batches = int(np.ceil(len(train_negatives) / batch_size)) + for i in range(n_batches): + print(f"neg batch: {i+1}/{n_batches}") + neg_batch_indices = train_negatives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in neg_batch_indices: + x_batch_1[j] = train_images[idx1] + x_batch_2[j] = train_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + negative_distances += distances.tolist() + + # update tp, tn, fp, fn + fp += (distances < threshold).sum() + tn += (distances > threshold).sum() + + tpr = tp / (tp + fn) + tnr = tn / (tn + fp) + print(f"sensitivity (tpr): {tpr}, specificity (tnr): {tnr}") + + plt.hist(negative_distances, bins=20, density=True, label='negative_distances') + plt.hist(positive_distances, bins=20, density=True, label='positive_distances') + plt.legend() + plt.show() + + + +def get_test_accuracy(threshold=0.85): + positive_distances = [] + negative_distances = [] + + tp = 0 + tn = 0 + fp = 0 + fn = 0 + + batch_size = 64 + x_batch_1 = np.zeros([batch_size] + list(img.shape)) + x_batch_2 = np.zeros([batch_size] + list(img.shape)) + n_batches = int(np.ceil(len(test_positives) / batch_size)) + for i in range(n_batches): + print(f"pos batch: {i+1}/{n_batches}") + pos_batch_indices = test_positives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in pos_batch_indices: + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + positive_distances += distances.tolist() + + # update tp, tn, fp, fn + tp += (distances < threshold).sum() + fn += (distances > threshold).sum() + + n_batches = int(np.ceil(len(test_negatives) / batch_size)) + for i in range(n_batches): + print(f"neg batch: {i+1}/{n_batches}") + neg_batch_indices = test_negatives[i * batch_size: (i + 1) * batch_size] + + # fill up x_batch and y_batch + j = 0 + for idx1, idx2 in neg_batch_indices: + x_batch_1[j] = test_images[idx1] + x_batch_2[j] = test_images[idx2] + j += 1 + + x1 = x_batch_1[:j] + x2 = x_batch_2[:j] + distances = model.predict([x1, x2]).flatten() + negative_distances += distances.tolist() + + # update tp, tn, fp, fn + fp += (distances < threshold).sum() + tn += (distances > threshold).sum() + + + tpr = tp / (tp + fn) + tnr = tn / (tn + fp) + print(f"sensitivity (tpr): {tpr}, specificity (tnr): {tnr}") + + plt.hist(negative_distances, bins=20, density=True, label='negative_distances') + plt.hist(positive_distances, bins=20, density=True, label='positive_distances') + plt.legend() + plt.show() + + + + +# params for training +train_steps = int(np.ceil(len(train_positives) * 2 / batch_size)) +valid_steps = int(np.ceil(len(test_positives) * 2 / batch_size)) + +# fit the model +r = model.fit_generator( + train_generator(), + steps_per_epoch=train_steps, + epochs=20, + validation_data=test_generator(), + validation_steps=valid_steps, +) + +# plot the loss +plt.plot(r.history['loss'], label='train loss') +plt.plot(r.history['val_loss'], label='val loss') +plt.legend() +plt.show() + +get_train_accuracy() +get_test_accuracy() From f9e4b8ff660b465bd1dbe1f8524dc5dfde8fef04 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 25 Aug 2019 02:51:36 -0400 Subject: [PATCH 155/329] update --- recommenders/tfidf.py | 72 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 recommenders/tfidf.py diff --git a/recommenders/tfidf.py b/recommenders/tfidf.py new file mode 100644 index 00000000..a6078ec3 --- /dev/null +++ b/recommenders/tfidf.py @@ -0,0 +1,72 @@ +import pandas as pd +import json + +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances + + +# get the data from: https://www.kaggle.com/tmdb/tmdb-movie-metadata +# load in the data +df = pd.read_csv('../large_files/tmdb_5000_movies.csv') + + +# convert the relevant data for each movie into a single string +# to be ingested by TfidfVectorizer +def genres_and_keywords_to_string(row): + genres = json.loads(row['genres']) + genres = ' '.join(''.join(j['name'].split()) for j in genres) + + keywords = json.loads(row['keywords']) + keywords = ' '.join(''.join(j['name'].split()) for j in keywords) + return "%s %s" % (genres, keywords) + + +# create a new string representation of each movie +df['string'] = df.apply(genres_and_keywords_to_string, axis=1) + + +# create a tf-idf vectorizer object +# remove stopwords automatically +tfidf = TfidfVectorizer(max_features=2000) + +# create a data matrix from the overviews +X = tfidf.fit_transform(df['string']) + +# check the shape of X +print("X.shape:", X.shape) + +# generate a mapping from movie title -> index (in df) +movie2idx = pd.Series(df.index, index=df['title']) + +# create a function that generates recommendations +def recommend(title): + # get the row in the dataframe for this movie + idx = movie2idx[title] + if type(idx) == pd.Series: + idx = idx.iloc[0] + # print("idx:", idx) + + # calculate the pairwise similarities for this movie + query = X[idx] + scores = cosine_similarity(query, X) + + # currently the array is 1 x N, make it just a 1-D array + scores = scores.flatten() + + # get the indexes of the highest scoring movies + # get the first K recommendations + # don't return itself! + recommended_idx = (-scores).argsort()[1:6] + + # return the titles of the recommendations + return df['title'].iloc[recommended_idx] + + +print("\nRecommendations for 'Scream 3':") +print(recommend('Scream 3')) + +print("\nRecommendations for 'Mortal Kombat':") +print(recommend('Mortal Kombat')) + +print("\nRecommendations for 'Runaway Bride':") +print(recommend('Runaway Bride')) From ce38ea620d4c4d982bc3407ad752ebcb23443ec7 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 29 Aug 2019 03:08:47 -0400 Subject: [PATCH 156/329] update --- tf2.0/.gitignore | 3 + tf2.0/aapl_msi_sbux.csv | 1260 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 1263 insertions(+) create mode 100644 tf2.0/.gitignore create mode 100644 tf2.0/aapl_msi_sbux.csv diff --git a/tf2.0/.gitignore b/tf2.0/.gitignore new file mode 100644 index 00000000..8369960b --- /dev/null +++ b/tf2.0/.gitignore @@ -0,0 +1,3 @@ +rl_trader_working.py +rl_trader_models +rl_trader_rewards diff --git a/tf2.0/aapl_msi_sbux.csv b/tf2.0/aapl_msi_sbux.csv new file mode 100644 index 00000000..cb98cb88 --- /dev/null +++ b/tf2.0/aapl_msi_sbux.csv @@ -0,0 +1,1260 @@ +AAPL,MSI,SBUX +67.8542,60.3,28.185 +68.5614,60.9,28.07 +66.8428,60.83,28.13 +66.7156,60.81,27.915 +66.6556,61.12,27.775 +65.7371,61.43,27.17 +65.7128,62.03,27.225 +64.1214,61.26,26.655 +63.7228,60.88,26.675 +64.4014,61.9,27.085 +63.2571,60.28,26.605 +64.1385,60.63,26.64 +63.5099,62.09,27.285 +63.0571,62.21,27.425 +61.4957,62.03,27.435 +60.0071,62.5,27.85 +61.5919,62.97,28.255 +60.8088,63.11,28.55 +61.5117,62.64,29.125 +61.6742,62.75,29.335 +62.5528,62.56,29.305 +61.2042,62.13,29.14 +61.1928,62.22,29.2925 +61.7857,62.34,28.84 +63.3799,62.07,28.83 +65.1028,61.64,28.465 +64.9271,61.67,28.415 +64.5828,62.4,28.715 +64.6756,62.43,28.525 +65.9871,63.61,28.69 +66.2256,63.29,28.345 +65.8765,63.46,28.525 +64.5828,63.56,28.455 +63.2371,64.03,28.475 +61.2728,63.7,28.435 +61.3988,63.7,29.13 +61.7128,62.8,28.85 +61.1028,62.99,29.055 +60.4571,62.67,28.9 +60.8871,63.17,29.06 +60.9971,63.64,28.705 +62.2414,64.69,28.9 +62.0471,64.63,29.2875 +61.3999,63.87,29.545 +59.9785,61.83,28.855 +60.8914,62.96,29.28 +57.5428,62.13,29.085 +56.0071,61.15,28.86 +55.7899,61.72,29.2025 +56.9528,61.78,29.32 +58.0185,61.75,29.695 +57.9231,56.02,29.915 +58.3399,56.39,30.25 +59.6007,56.8,30.0 +61.4457,57.44,30.29 +63.2542,57.2,30.42 +62.7557,56.37,30.07 +63.6457,56.89,30.19 +64.2828,57.29,30.935 +65.8156,56.95,31.24 +65.5225,56.79,31.095 +66.2628,57.0,31.205 +65.2528,56.78,31.18 +64.7099,56.48,31.5485 +64.9628,56.17,31.41 +63.4085,56.89,31.76 +61.2642,57.1,32.035 +62.0825,57.53,31.775 +61.8942,57.84,32.065 +63.2757,58.25,31.915 +62.8085,57.77,32.125 +63.0505,57.3,32.075 +63.1628,57.48,31.76 +63.5928,57.81,31.68 +63.0627,58.53,32.13 +63.5642,58.32,31.815 +64.5114,58.54,31.735 +64.2478,57.96,31.57 +64.3885,57.83,31.73 +64.1871,57.41,31.665 +63.5871,56.27,31.17 +62.6371,56.92,31.51 +63.1158,56.94,32.52 +62.6985,56.61,33.055 +62.5142,56.38,32.71 +61.7414,56.26,32.225 +62.2807,57.19,32.985 +61.4357,56.93,32.8 +61.7142,57.33,33.015 +61.6814,57.35,33.5475 +60.4285,56.78,33.205 +59.5482,55.5,32.61 +59.0714,55.82,32.345 +57.5057,55.59,32.005 +57.5185,56.35,32.37 +56.8671,57.49,32.9 +56.2542,57.84,32.845 +56.6471,57.73,32.755 +58.4599,57.98,33.12 +59.7842,57.49,33.395 +60.1142,57.26,33.65 +59.6314,57.93,33.86 +59.2928,57.86,34.145 +60.3357,58.03,34.065 +60.1042,58.43,34.05 +61.0411,59.05,34.67 +60.9299,59.54,34.86 +61.0628,59.17,34.83 +61.4564,59.32,34.76 +61.4728,59.42,34.1 +61.6797,59.36,34.24 +60.7071,59.85,34.395 +60.9014,59.87,34.51 +59.8557,59.98,33.83 +62.9299,56.04,33.305 +62.6428,54.25,34.085 +62.9985,54.26,36.68 +63.9699,54.01,36.225 +64.7599,54.35,35.965 +64.6471,54.83,35.6445 +65.2394,55.32,36.74 +66.0771,56.02,37.115 +67.0642,56.1,36.985 +66.4642,56.4,36.4 +66.4256,56.48,36.095 +65.8585,57.13,36.47 +64.9214,57.36,36.4 +66.7656,57.44,36.465 +69.9385,57.84,36.32 +71.2142,57.71,35.925 +71.1299,56.96,35.37 +71.7614,57.15,35.355 +72.5342,57.09,35.145 +71.5814,57.05,35.33 +71.7656,56.06,35.3565 +71.8514,56.33,35.95 +71.5742,56.74,35.985 +71.8528,56.55,35.94 +69.7985,56.12,35.08 +70.1279,56.39,35.48 +70.2428,56.19,35.59 +69.6022,56.01,35.26 +69.7971,56.28,35.8 +71.2415,56.08,36.07 +70.7528,56.17,36.025 +71.1742,56.47,35.785 +72.3099,57.59,36.22 +70.6628,57.37,37.1075 +66.8156,57.25,37.695 +67.5271,57.5,37.835 +66.4142,57.46,37.785 +64.3028,57.81,37.62 +65.0456,58.28,38.02 +66.3828,59.26,38.665 +67.4714,59.69,38.175 +66.7728,60.39,38.06 +70.0914,60.37,37.68 +69.8714,59.99,38.275 +68.7899,59.85,38.17 +69.4599,59.87,38.59 +68.9642,59.75,38.665 +68.1071,59.38,38.485 +69.7085,60.89,38.58 +69.9371,60.7,38.595 +69.0585,60.56,38.435 +69.0042,61.14,38.7 +69.6785,60.89,38.4305 +68.7056,59.62,37.765 +69.5125,59.39,37.63 +69.9482,60.61,38.56 +70.4016,60.52,38.91 +70.8628,61.03,39.05 +71.2399,60.49,38.355 +71.5876,60.71,39.02 +72.0714,60.92,39.3675 +72.6985,60.81,39.655 +74.4802,61.18,39.73 +74.2667,60.43,40.45 +74.9942,62.4,40.025 +75.9871,62.51,39.525 +75.1368,62.99,39.98 +75.6965,62.44,39.355 +73.8111,62.73,39.81 +74.9851,62.25,40.415 +74.6716,62.52,40.525 +74.2899,62.39,40.185 +75.2499,62.71,40.185 +75.0641,62.68,40.995 +74.4171,62.65,40.565 +73.2131,62.49,39.535 +74.3656,63.12,40.6 +74.1496,63.51,40.495 +74.2871,64.24,40.3075 +74.3762,64.45,40.7305 +75.4514,64.58,40.57 +74.9986,65.57,40.595 +74.0898,65.42,40.27 +74.2214,64.61,39.96 +73.5714,64.58,39.845 +74.4479,65.41,40.765 +74.2571,65.88,40.675 +74.8199,65.79,40.355 +76.1999,65.57,40.755 +77.9942,65.5,40.81 +79.4385,65.88,40.73 +78.7471,65.66,40.535 +80.9031,65.79,40.275 +80.7142,64.93,39.75 +81.1286,65.23,39.86 +80.0028,66.18,39.97 +80.9185,65.79,39.865 +80.7928,65.41,38.69 +80.1942,64.6,38.2 +80.0771,64.86,38.24 +79.2042,65.05,38.175 +79.6428,65.36,38.23 +79.2842,65.52,38.045 +78.6813,66.16,38.84 +77.7799,65.85,38.575 +78.4314,65.61,38.83 +81.4413,66.78,39.16 +81.0956,67.1,39.285 +80.5571,67.18,39.44 +80.0128,67.33,39.285 +79.2171,67.25,39.275 +80.1456,67.5,39.195 +79.0185,66.33,38.585 +77.2828,66.2,38.475 +77.7042,65.92,38.085 +77.1481,66.19,38.605 +77.6371,65.99,39.015 +76.6455,66.5,38.8 +76.1342,66.15,38.835 +76.5328,65.49,37.56 +78.0556,66.35,37.73 +79.6228,65.62,38.095 +79.1785,65.81,37.645 +77.2385,66.1,37.45 +78.4385,67.11,36.825 +78.7871,64.51,36.8 +79.4542,65.34,36.695 +78.0099,64.42,37.49 +78.6428,64.43,37.105 +72.3571,64.34,36.945 +71.5356,63.98,35.78 +71.3974,64.91,35.955 +71.5142,63.8,35.56 +71.6471,62.72,34.485 +72.6842,62.99,35.325 +73.2271,62.89,35.245 +73.2156,63.4,36.18 +74.2399,64.6,37.0175 +75.5699,65.08,37.4 +76.5656,65.03,37.25 +76.5599,65.78,36.955 +77.7756,65.67,37.345 +77.7128,65.61,37.515 +77.9985,65.78,36.985 +76.7671,64.93,36.66 +75.8785,65.22,36.775 +75.0356,65.02,36.28 +75.3642,64.96,36.28 +74.5799,65.1,35.275 +73.9071,65.45,35.89 +75.3814,65.9,36.095 +75.1771,66.2,35.48 +75.3942,65.98,35.235 +75.8914,66.76,35.83 +76.0514,66.33,35.65 +75.8214,66.57,36.345 +75.7771,66.64,36.535 +75.8456,66.43,36.78 +76.5842,66.08,37.515 +76.6585,65.02,37.815 +75.8071,64.21,37.215 +74.9556,63.67,37.135 +75.2485,65.08,37.09 +75.9142,65.72,37.3 +75.8942,65.7,37.955 +75.5285,66.66,38.4775 +76.1242,66.81,38.355 +77.0271,66.05,37.885 +77.8556,66.18,37.305 +77.1114,65.16,36.77 +76.7799,64.36,36.7 +76.6942,64.3,36.85 +76.6771,64.29,36.69 +77.3785,64.91,37.005 +77.5071,65.1,36.835 +76.9699,65.09,36.545 +75.9742,64.26,35.775 +74.7814,64.43,35.215 +74.7771,64.95,35.74 +75.7599,65.26,36.24 +74.7828,63.99,35.11 +74.2299,63.39,34.365 +74.5256,63.78,34.655 +73.9942,63.37,34.445 +74.1442,63.23,35.395 +74.9914,63.15,35.075 +75.8814,62.51,35.24 +75.9569,63.27,35.5745 +74.9642,63.29,35.195 +81.1099,63.0,35.545 +81.7056,62.5,35.725 +84.8699,62.64,35.465 +84.6185,63.43,35.32 +84.2985,63.58,35.31 +84.4971,62.65,35.56 +84.6542,65.51,35.3 +85.8513,66.15,35.46 +84.9156,66.4,34.79 +84.6185,67.14,34.87 +83.9985,67.38,34.79 +83.6488,67.26,35.145 +84.6899,67.8,35.575 +84.8228,67.75,35.58 +84.8385,67.2,35.085 +84.1171,66.34,34.925 +85.3585,66.3,35.47 +86.3699,66.88,35.51 +86.3871,66.52,35.115 +86.6156,66.89,35.2 +86.7528,66.63,35.7 +87.7328,67.0,35.99 +89.3756,67.02,36.83 +89.1442,66.93,36.635 +90.7685,66.91,36.555 +90.4285,67.42,36.62 +89.8071,67.4,36.925 +91.0771,66.86,37.09 +92.1171,67.23,37.335 +92.4785,67.17,37.36 +92.2242,67.66,37.665 +93.7,67.67,37.59 +94.25,67.7,37.3 +93.86,66.93,37.4 +92.29,66.46,36.98 +91.28,66.78,37.345 +92.2,66.72,37.545 +92.08,66.64,37.655 +92.18,66.62,37.78 +91.86,67.06,38.615 +90.91,67.07,38.3 +90.83,67.1,38.365 +90.28,66.73,38.715 +90.36,66.55,39.06 +90.9,66.56,39.03 +91.98,66.78,38.97 +92.93,66.57,38.69 +93.52,66.96,39.04 +93.48,67.02,39.095 +94.03,67.41,39.53 +95.96799999999999,67.24,39.345 +95.35,66.27,39.28 +95.39,66.58,39.725 +95.035,66.45,39.425 +95.22,66.0,39.3 +96.45,66.08,39.28 +95.32,65.49,39.445 +94.78,65.67,39.365 +93.0899,64.94,38.62 +94.43,65.49,38.97 +93.939,65.74,38.805 +94.72,66.05,39.37 +97.19,65.77,39.57 +97.03,65.61,40.225 +97.671,65.0,39.37 +99.02,65.21,39.18 +98.38,64.74,39.325 +98.15,64.83,39.45 +95.6,63.68,38.84 +96.13,64.11,38.49 +95.59,64.11,38.765 +95.12,61.39,38.395 +94.96,61.21,38.565 +94.48,61.25,38.355 +94.74,62.19,38.81 +95.99,61.73,38.935 +95.97,61.64,38.91 +97.24,62.03,38.62 +97.5,61.52,38.31 +97.98,61.0,38.455 +99.16,60.81,38.795 +100.53,61.37,39.06 +100.57,61.64,39.015 +100.58,61.7,38.735 +101.32,61.23,38.64 +101.54,61.02,38.985 +100.889,60.3,38.895 +102.13,59.68,38.96 +102.25,59.37,38.905 +102.5,59.4,38.905 +103.3,59.01,38.74 +98.94,58.94,38.395 +98.12,58.98,38.58 +98.97,58.89,38.975 +98.36,61.02,38.835 +97.99,61.08,38.56 +101.0,61.22,38.605 +101.43,61.22,38.06 +101.66,61.54,37.735 +101.63,61.42,37.46 +100.86,61.69,37.545 +101.58,61.91,37.67 +101.79,62.04,37.865 +100.96,61.88,38.035 +101.06,61.68,37.3 +102.64,61.57,36.9775 +101.75,61.8,37.66 +97.87,62.24,37.06 +100.75,63.42,37.585 +100.11,63.18,37.635 +100.75,63.28,37.73 +99.18,62.34,37.305 +99.9,61.03,37.225 +99.62,61.3,37.945 +99.62,61.5,37.5725 +98.75,60.47,37.025 +100.8,61.58,37.63 +101.02,60.46,37.24 +100.73,59.05,37.23 +99.81,58.5,36.095 +98.75,58.73,36.37 +97.54,59.32,36.19 +96.26,59.18,36.32 +97.67,60.79,36.77 +99.76,61.25,37.35 +102.47,62.39,37.18 +102.99,61.63,37.3 +104.83,62.25,37.42 +105.22,62.57,37.905 +105.11,62.8,37.985 +106.74,64.06,38.525 +107.34,63.94,38.27 +106.98,63.7,38.66 +108.0,64.5,37.78 +109.4,64.68,38.05 +108.6,66.76,38.355 +108.86,64.46,38.33 +108.7,63.42,38.725 +109.01,64.14,38.895 +108.83,63.94,38.825 +109.7,63.55,38.865 +111.25,63.7,38.925 +112.82,64.43,38.945 +114.18,65.25,39.06 +113.99,65.4,38.915 +115.47,66.0,38.785 +114.67,65.94,38.91 +116.31,65.66,39.1 +116.47,65.27,39.88 +118.625,65.81,40.26 +117.6,65.6,40.105 +119.0,65.56,39.85 +118.93,65.72,40.605 +115.07,65.44,40.425 +114.63,65.51,40.185 +115.93,65.32,40.235 +115.49,65.2,40.655 +115.0,65.0,41.785 +112.4,65.27,41.9 +114.12,65.29,41.515 +111.95,63.52,41.33 +111.62,63.29,41.56 +109.73,62.31,41.625 +108.225,61.91,40.445 +106.745,61.73,39.565 +109.41,63.99,40.2175 +112.65,65.11,40.015 +111.78,65.5,39.72 +112.94,66.53,40.27 +112.54,66.93,40.715 +112.01,67.34,40.635 +113.99,67.49,40.915 +113.91,67.87,41.19 +112.52,67.53,40.895 +110.38,67.08,41.025 +109.33,66.51,40.72 +106.25,65.06,39.94 +106.26,64.51,39.615 +107.75,64.43,40.59 +111.89,65.43,41.245 +112.01,65.11,39.895 +109.25,64.35,40.115 +110.22,64.11,40.435 +109.8,63.76,40.21 +106.82,63.41,39.79 +105.99,64.05,40.305 +108.72,64.02,40.6125 +109.55,64.31,40.645 +112.4,65.36,41.37 +112.98,65.48,44.11 +113.1,65.71,44.06 +109.14,64.94,44.17 +115.31,63.84,43.7825 +118.9,63.83,44.525 +117.16,62.41,43.765 +118.63,62.81,43.995 +118.65,64.01,44.245 +119.56,63.94,44.35 +119.94,64.0,44.82 +118.93,64.66,44.5 +119.72,67.78,44.41 +122.02,68.22,45.59 +124.88,68.57,45.395 +126.46,70.0,45.9125 +127.08,69.91,45.79 +127.83,69.79,46.015 +128.715,69.12,46.5 +128.45,69.03,46.585 +129.495,69.83,46.755 +133.0,68.63,46.79 +132.17,68.53,46.725 +128.79,68.02,47.13 +130.415,68.47,47.275 +128.46,67.94,46.7425 +129.09,68.89,47.1125 +129.36,68.14,47.0 +128.54,67.64,46.53 +126.41,67.93,46.815 +126.6,66.82,46.1075 +127.14,66.57,46.52 +124.51,65.33,46.09 +122.24,65.31,45.71 +124.45,64.96,46.69 +123.59,64.8,46.645 +124.95,65.86,47.0225 +127.04,65.32,47.1925 +128.47,66.65,47.92 +127.495,66.34,48.88 +125.9,66.83,48.73 +127.21,66.52,48.685 +126.69,66.23,48.9575 +123.38,65.35,47.885 +124.24,65.42,47.54 +123.25,65.38,47.535 +126.37,66.39,47.99 +124.43,66.67,47.35 +124.25,66.67,46.51 +125.32,62.51,47.195 +127.35,61.48,47.26 +126.01,61.99,47.035 +125.6,62.42,47.615 +126.56,62.32,47.96 +127.1,62.53,48.17 +126.85,61.97,48.5 +126.3,61.91,48.3 +126.78,61.82,48.14 +126.17,61.86,48.245 +124.75,60.68,47.62 +127.6,61.16,47.97 +126.91,61.43,48.37 +128.62,61.59,48.335 +129.67,60.84,49.43 +130.28,60.57,51.84 +132.65,60.98,50.87 +130.56,60.69,50.61 +128.64,59.74,50.65 +125.15,59.75,49.58 +128.95,60.28,50.29 +128.7,60.68,50.445 +125.8,58.59,49.405 +125.01,58.75,48.93 +125.26,60.01,49.35 +127.62,60.59,49.78 +126.32,59.8,49.5 +125.865,59.42,49.71 +126.01,59.25,49.59 +128.95,59.79,50.555 +128.77,59.3,50.8 +130.19,60.12,51.18 +130.07,59.8,51.42 +130.06,59.8,51.03 +131.39,59.79,51.33 +132.54,59.66,51.48 +129.62,59.11,50.84 +132.045,59.06,51.59 +131.78,59.63,51.81 +130.28,59.0,51.96 +130.535,59.65,52.22 +129.96,59.19,51.73 +130.12,59.48,52.12 +129.36,58.8,51.72 +128.65,58.61,52.19 +127.8,58.08,51.53 +127.42,57.9,51.54 +128.88,58.49,52.69 +128.59,58.55,52.49 +127.17,57.65,52.63 +126.92,57.95,52.27 +127.6,58.18,52.965 +127.3,57.97,53.24 +127.88,58.39,54.11 +126.6,58.05,53.93 +127.61,59.22,53.9 +127.03,59.12,54.115 +128.11,58.29,53.71 +127.5,58.35,54.07 +126.75,58.38,54.62 +124.53,57.14,53.55 +125.425,57.34,53.615 +126.6,57.6,53.89 +126.44,57.51,54.24 +126.0,57.22,54.305 +125.69,57.49,54.375 +122.57,56.79,53.39 +120.07,56.94,54.05 +123.28,57.48,54.57 +125.66,58.43,55.7 +125.61,58.6,55.75 +126.82,58.89,55.34 +128.51,59.29,55.74 +129.62,58.85,55.69 +132.07,59.4,56.21 +130.75,59.57,56.2 +125.22,59.35,56.69 +125.16,58.85,56.56 +124.5,59.5,57.29 +122.77,58.71,56.98 +123.38,59.11,57.14 +122.99,59.58,57.51 +122.37,59.86,58.06 +121.3,60.16,57.93 +118.44,59.76,58.19 +114.64,60.22,58.7 +115.4,64.04,59.01 +115.13,63.8,57.23 +115.52,64.19,57.2 +119.72,63.99,56.27 +113.49,63.35,56.35 +115.24,64.6,56.38 +115.15,64.34,56.85 +115.96,64.98,57.1 +117.16,65.27,57.74 +116.5,65.77,57.83 +115.01,65.35,57.59 +112.65,63.89,55.81 +105.76,62.45,52.84 +103.12,60.79,50.34 +103.74,60.44,51.09 +109.69,63.14,53.96 +112.92,64.29,55.95 +113.29,64.55,55.63 +112.76,64.82,54.71 +107.72,63.85,53.5 +112.34,64.72,55.26 +110.37,65.11,54.69 +109.27,66.31,54.28 +112.31,69.61,55.21 +110.15,68.3,54.69 +112.57,69.09,55.37 +114.21,67.08,56.53 +115.31,66.84,56.29 +116.28,67.15,56.91 +116.41,67.47,57.26 +113.92,67.03,57.28 +113.45,67.09,56.84 +115.21,67.05,57.54 +113.4,66.58,57.12 +114.32,67.8,57.79 +115.0,67.91,58.37 +114.71,69.2,57.99 +112.44,67.93,55.77 +109.06,67.45,55.72 +110.3,68.38,56.84 +109.58,67.76,57.48 +110.38,68.4,58.08 +110.78,69.75,59.04 +111.31,69.19,58.69 +110.78,69.79,58.78 +109.5,69.5,59.46 +112.12,68.78,60.07 +111.6,69.43,60.54 +111.79,69.04,60.16 +110.21,68.7,58.82 +111.86,69.27,59.69 +111.04,69.26,59.93 +111.73,69.03,60.97 +113.77,69.48,60.88 +113.76,69.47,60.53 +115.5,70.48,61.49 +119.08,70.48,62.61 +115.28,70.05,63.43 +114.55,69.96,62.71 +119.27,70.37,63.51 +120.53,70.13,62.5 +119.5,69.97,62.57 +121.18,70.73,62.24 +122.57,71.36,62.8 +122.0,65.24,61.96 +120.92,67.4,62.28 +121.06,68.01,61.97 +120.57,68.2,61.34 +116.77,68.34,62.18 +116.11,70.02,61.87 +115.72,69.44,61.07 +112.34,69.03,59.74 +114.175,70.02,60.68 +113.69,71.05,60.55 +117.29,71.98,61.8 +118.78,72.45,61.46 +119.3,72.19,61.99 +117.75,72.24,62.64 +118.88,71.96,61.96 +118.03,71.83,62.19 +117.81,72.02,62.18 +118.3,71.78,61.39 +117.34,72.05,61.37 +116.28,71.89,61.22 +115.2,71.08,59.55 +119.03,72.11,61.75 +118.28,70.38,61.89 +118.23,69.75,62.16 +115.62,69.31,61.18 +116.17,69.37,61.87 +113.18,68.61,59.82 +112.48,68.14,59.92 +110.49,69.13,59.98 +111.34,69.52,60.35 +108.98,68.56,59.515 +106.03,67.58,58.62 +107.33,68.03,59.54 +107.23,68.87,59.99 +108.61,69.21,60.34 +108.03,69.06,60.32 +106.82,69.18,60.19 +108.74,69.64,61.13 +107.32,69.3,60.82 +105.26,68.45,60.03 +105.35,67.13,58.26 +102.71,66.39,58.65 +100.7,65.43,58.13 +96.45,64.11,56.69 +96.96,64.25,56.63 +98.53,64.37,57.82 +99.96,64.91,59.46 +97.39,63.37,57.87 +99.52,63.11,58.98 +97.13,61.59,58.0 +96.66,61.13,58.55 +96.79,60.36,56.92 +96.3,60.82,59.03 +101.42,62.04,59.17 +99.44,62.42,57.71 +99.99,63.16,58.61 +93.42,64.8,57.63 +94.09,64.74,59.285 +97.34,66.77,60.77 +96.43,66.85,61.4 +94.48,64.32,60.695 +96.35,64.88,59.53 +96.6,64.25,58.29 +94.02,62.82,54.49 +95.01,62.09,54.14 +94.99,62.24,54.42 +94.27,60.97,55.14 +93.7,60.52,54.92 +93.99,61.78,55.86 +96.64,63.42,56.41 +98.12,65.05,57.63 +96.26,64.78,56.96 +96.04,66.0,57.67 +96.88,66.75,58.87 +94.69,70.78,58.46 +96.1,72.84,58.11 +96.76,74.06,58.75 +96.91,74.86,58.34 +96.69,73.49,58.21 +100.53,71.19,60.04 +100.75,71.28,59.56 +101.5,71.25,59.04 +103.01,70.95,58.7 +101.87,71.01,58.0 +101.03,71.1,57.6 +101.12,71.48,57.07 +101.17,71.22,57.52 +102.26,71.2,57.59 +102.52,71.83,58.65 +104.58,71.97,59.08 +105.97,72.24,59.67 +105.8,72.83,59.55 +105.92,72.59,59.7 +105.91,73.12,59.1 +106.72,73.71,59.38 +106.13,73.15,58.83 +105.67,72.59,58.36 +105.19,73.37,58.96 +107.68,74.09,59.55 +109.56,74.89,60.01 +108.99,75.7,59.7 +109.99,76.11,61.02 +111.12,76.32,60.25 +109.81,75.71,60.04 +110.96,76.09,60.83 +108.54,74.99,61.17 +108.66,75.24,61.04 +109.02,74.88,60.9 +110.44,75.04,59.5 +112.04,75.37,60.21 +112.1,75.31,60.13 +109.85,75.64,60.51 +107.48,75.69,60.89 +106.91,75.97,60.9 +107.13,75.55,60.9 +105.97,74.99,60.64 +105.68,75.56,57.68 +105.08,75.51,57.77 +104.35,75.9,57.72 +97.82,76.04,56.9 +94.83,75.34,56.42 +93.74,75.19,56.23 +93.64,76.0,57.36 +95.18,74.96,56.25 +94.19,74.22,56.39 +93.24,74.25,56.25 +92.72,70.54,56.31 +92.79,70.82,56.64 +93.42,71.05,57.49 +92.51,70.07,56.23 +90.34,71.11,56.3 +90.52,70.62,55.82 +93.88,70.83,55.53 +93.49,69.89,54.88 +94.56,69.46,54.8 +94.2,68.72,54.55 +95.22,68.75,54.62 +96.43,68.78,54.6 +97.9,69.68,55.44 +99.62,69.35,55.15 +100.41,69.4,55.29 +100.35,69.5,55.15 +99.86,69.27,54.89 +98.46,69.06,54.82 +97.72,68.8,54.62 +97.92,68.47,54.61 +98.63,68.77,55.59 +99.03,68.16,55.3 +98.94,69.05,55.22 +99.65,68.56,55.58 +98.83,67.45,54.865 +97.34,66.82,55.04 +97.46,67.24,55.57 +97.14,67.54,55.35 +97.55,67.8,55.53 +95.33,67.33,55.31 +95.1,68.35,55.38 +95.91,67.81,55.81 +95.55,67.43,55.61 +96.1,68.01,56.13 +93.4,64.73,54.68 +92.04,63.08,53.69 +93.59,63.69,54.85 +94.4,64.55,56.74 +95.6,65.97,57.12 +95.89,66.01,56.99 +94.99,64.77,56.77 +95.53,65.3,56.75 +95.94,65.05,56.91 +96.68,66.38,56.51 +96.98,66.62,56.32 +97.42,67.4,57.48 +96.87,67.46,56.48 +98.79,67.58,57.59 +98.78,67.4,57.41 +99.83,67.55,56.92 +99.87,67.5,56.76 +99.96,67.93,57.54 +99.43,67.55,57.6 +98.66,68.25,57.9 +97.34,68.09,57.95 +96.67,68.42,58.31 +102.95,69.26,57.85 +104.34,69.58,58.21 +104.21,69.38,58.05 +106.05,69.63,57.63 +104.48,68.84,56.73 +105.79,69.29,55.94 +105.87,70.24,55.42 +107.48,73.5,55.9 +108.37,73.93,55.36 +108.81,74.28,55.2 +108.0,74.28,55.62 +107.93,75.52,55.47 +108.18,74.54,55.47 +109.48,75.44,55.25 +109.38,75.58,55.37 +109.22,75.68,55.8 +109.08,75.99,55.53 +109.36,76.34,54.94 +108.51,76.49,55.85 +108.85,76.99,56.4 +108.03,77.12,57.09 +107.57,77.18,57.29 +106.94,77.2,57.29 +106.82,77.29,56.8 +106.0,77.51,56.4 +106.1,76.99,56.23 +106.73,76.8,56.31 +107.73,77.95,56.18 +107.7,78.32,56.02 +108.36,78.08,56.32 +105.52,77.37,55.3 +103.13,76.65,54.35 +105.44,77.23,54.71 +107.95,76.09,53.98 +111.77,75.47,53.9 +115.57,76.04,54.11 +114.92,75.63,53.74 +113.58,75.76,53.01 +113.57,75.21,53.3 +113.55,75.73,53.98 +114.62,76.19,54.39 +112.71,76.11,54.43 +112.88,75.95,54.04 +113.09,76.32,54.19 +113.95,76.79,53.98 +112.18,77.21,53.45 +113.05,76.28,54.14 +112.52,75.25,53.84 +113.0,74.42,53.53 +113.05,74.35,53.35 +113.89,74.64,53.14 +114.06,74.48,53.46 +116.05,74.67,53.3 +116.3,73.5,52.92 +117.34,73.76,53.16 +116.98,73.06,52.95 +117.63,73.58,53.08 +117.55,73.13,52.76 +117.47,73.8,52.61 +117.12,73.8,53.15 +117.06,73.57,53.59 +116.6,73.62,53.63 +117.65,74.49,54.18 +118.25,74.16,53.67 +115.59,73.58,53.63 +114.48,73.48,53.59 +113.72,72.83,53.53 +113.54,72.58,53.07 +111.49,72.32,52.5 +111.59,71.57,52.98 +109.83,71.29,51.77 +108.84,75.9,52.75 +110.41,77.71,54.49 +111.06,78.56,54.62 +110.88,78.96,54.58 +107.79,79.19,53.57 +108.43,80.38,53.93 +105.71,80.6,54.22 +107.11,81.8,54.59 +109.99,80.51,55.44 +109.95,80.35,55.85 +110.06,79.98,55.77 +111.73,79.83,56.1 +111.8,80.31,57.12 +111.23,80.26,57.59 +111.79,80.98,57.43 +111.57,80.86,57.59 +111.46,81.11,58.17 +110.52,80.25,57.97 +109.49,79.19,58.51 +109.9,79.5,57.21 +109.11,80.92,57.5 +109.95,82.22,57.44 +111.03,83.27,58.76 +112.12,83.3,58.65 +113.95,82.79,58.75 +113.3,82.6,58.77 +115.19,83.24,59.31 +115.19,82.9,58.75 +115.82,83.46,57.71 +115.97,83.4,57.66 +116.64,83.93,57.65 +116.95,83.76,57.7 +117.06,84.0,57.44 +116.29,83.72,57.11 +116.52,83.41,57.01 +117.26,83.52,56.86 +116.76,82.86,56.35 +116.73,82.87,56.32 +115.82,82.89,55.52 +116.15,83.6,55.35 +116.02,83.49,55.99 +116.61,82.64,56.46 +117.91,82.89,57.13 +118.99,83.02,58.2 +119.11,82.63,57.88 +119.75,82.88,58.1 +119.25,82.18,58.03 +119.04,82.27,57.85 +120.0,80.73,58.0 +119.99,81.65,58.45 +119.78,81.86,57.89 +120.0,82.36,57.66 +120.08,82.44,57.76 +119.97,84.35,58.44 +121.88,85.29,58.7 +121.94,83.36,58.46 +121.95,82.98,56.12 +121.63,81.7,55.9 +121.35,80.71,55.22 +128.75,80.03,53.9 +128.53,81.0,53.87 +129.08,81.6,55.06 +130.29,81.73,55.73 +131.53,77.34,55.24 +132.04,78.25,55.22 +132.42,77.81,55.81 +132.12,78.37,56.22 +133.29,78.48,56.11 +135.02,78.68,56.58 +135.51,79.4,56.86 +135.345,78.66,56.73 +135.72,79.31,57.35 +136.7,80.15,57.54 +137.11,79.65,57.57 +136.53,79.36,57.64 +136.66,80.27,57.48 +136.93,79.28,56.78 +136.99,78.97,56.87 +139.79,79.98,57.14 +138.96,80.02,57.12 +139.78,80.55,57.1 +139.34,79.97,56.68 +139.52,79.66,56.2 +139.0,80.2,55.74 +138.68,81.37,55.19 +139.14,82.1,54.53 +139.2,81.65,54.63 +138.99,83.36,54.27 +140.46,85.24,54.54 +140.69,85.15,54.8 +139.99,84.72,55.78 +141.46,84.3,55.81 +139.84,83.76,55.54 +141.42,83.59,55.89 +140.92,83.74,55.85 +140.64,83.67,56.81 +140.88,84.0,57.23 +143.8,84.0,57.35 +144.12,84.13,57.54 +143.93,84.87,58.16 +143.66,86.22,58.39 +143.7,84.83,58.44 +144.77,84.52,58.32 +144.02,83.83,58.22 +143.66,84.2,57.92 +143.34,84.25,58.02 +143.17,83.71,57.95 +141.63,83.45,57.88 +141.8,82.84,57.58 +141.05,82.34,57.51 +141.83,83.08,58.08 +141.2,82.64,58.35 +140.68,83.37,59.04 +142.44,84.1,60.08 +142.27,83.72,60.61 +143.64,84.72,61.11 +144.53,85.39,60.96 +143.68,85.38,61.56 +143.79,86.07,61.3 +143.65,85.97,60.06 +146.58,86.16,60.18 +147.51,85.92,60.5 +147.06,86.37,60.59 +146.53,86.1,60.83 +148.96,84.44,60.95 +153.01,83.59,60.94 +153.99,84.77,60.98 +153.26,85.77,60.66 +153.95,85.36,60.27 +156.1,84.21,59.93 +155.7,84.48,60.45 +155.47,83.7,59.98 +150.25,81.85,59.73 +152.54,80.83,59.82 +153.06,80.83,61.36 +153.99,82.93,61.23 +153.8,82.11,61.15 +153.34,82.22,61.89 +153.87,82.27,62.9 +153.61,81.86,63.3 +153.67,82.83,63.26 +152.76,83.57,63.61 +153.18,85.64,63.75 +155.45,86.62,64.57 +153.93,87.31,64.27 +154.45,87.46,64.16 +155.37,86.18,63.5 +154.99,86.65,62.24 +148.98,86.17,62.19 +145.42,86.11,61.29 +146.59,86.04,60.92 +145.16,84.87,60.27 +144.29,84.45,60.09 +142.27,84.72,60.14 +146.34,86.2,60.9 +145.01,85.74,59.86 +145.87,86.24,59.96 +145.63,87.36,59.51 +146.28,88.64,59.81 +145.82,88.42,59.64 +143.73,87.72,58.96 +145.83,88.13,59.18 +143.68,86.8,58.36 +144.02,86.74,58.31 +143.5,86.68,58.25 +144.09,86.83,57.94 +142.73,85.96,57.6 +144.18,87.31,58.04 +145.06,87.23,57.81 +145.53,87.65,57.9 +145.74,88.65,58.54 +147.77,88.33,58.38 +149.04,88.61,58.76 +149.56,88.28,58.33 +150.08,88.45,58.21 +151.02,89.78,58.11 +150.34,89.96,58.03 +150.27,90.52,57.98 +152.09,90.67,58.02 +152.74,91.39,58.55 +153.46,91.84,57.94 +150.56,92.21,59.5 +149.5,91.01,54.0 +148.73,90.68,53.98 +158.59,90.43,54.73 +157.14,90.43,55.43 +155.57,90.4,55.68 +156.39,90.37,55.44 +158.81,89.2,55.63 +160.08,88.58,54.52 +161.06,88.51,53.74 +155.32,86.99,53.07 +157.48,87.48,53.18 +159.85,88.6,53.22 +161.6,87.92,53.15 +160.95,88.19,53.5 +157.86,87.13,53.04 +157.5,87.37,52.7 +157.21,87.2,53.15 +159.78,86.41,54.45 +159.98,86.21,54.08 +159.27,86.51,53.94 +159.86,86.88,54.36 +161.47,87.3,54.4 +162.91,86.94,54.1 +163.35,87.66,54.52 +164.0,88.12,54.86 +164.05,87.87,54.93 +162.08,86.66,55.13 +161.91,85.72,54.31 +161.26,86.35,53.47 +158.63,85.12,53.49 +161.5,87.01,54.02 +160.86,87.74,53.54 +159.65,85.97,54.29 +158.28,84.58,54.53 +159.88,85.48,54.67 +158.67,85.48,54.69 +158.73,85.84,54.62 +156.07,85.65,55.15 +153.39,84.99,55.01 +151.89,84.29,55.09 +150.55,83.5,54.95 +153.14,83.02,55.13 +154.23,84.1,54.99 +153.28,83.69,54.5 +154.12,84.87,53.71 +153.81,85.84,53.81 +154.48,85.69,53.99 +153.48,85.64,53.93 +155.39,86.0,54.6 +155.3,89.44,55.17 +155.84,89.1,55.02 +155.9,89.08,55.42 +156.55,89.26,55.64 +156.0,89.8,55.97 +156.99,89.93,55.72 +159.88,89.36,54.91 +160.47,88.88,54.51 +159.76,89.09,55.21 +155.98,89.65,55.4 +156.25,90.0,54.57 +156.17,89.94,54.27 +157.1,90.23,54.28 +156.41,90.04,54.16 +157.41,90.28,54.91 +163.05,91.19,54.88 +166.72,90.37,55.17 +169.04,90.54,54.84 +166.89,90.56,55.13 +168.11,90.02,54.87 +172.5,94.25,56.03 +174.25,92.43,56.57 +174.81,92.11,57.22 +176.24,92.66,57.91 +175.88,91.61,57.36 +174.67,91.07,57.04 +173.97,91.37,56.64 +171.34,91.02,56.93 +169.08,90.39,56.7 +171.1,90.97,57.24 +170.15,90.95,56.93 +169.98,92.33,56.81 +173.14,92.45,57.26 +174.96,91.83,57.14 +174.97,92.36,56.8 +174.09,92.88,55.91 +173.07,94.53,56.66 +169.48,94.17,57.51 +171.85,94.11,57.82 +171.05,93.03,57.32 +169.8,93.63,58.76 +169.64,90.66,59.34 +169.01,91.29,59.28 +169.32,92.8,59.14 +169.37,92.52,58.61 +172.67,92.33,59.07 +171.7,93.37,59.27 +172.27,93.94,59.49 +172.22,92.2,59.7 +173.97,93.15,58.29 +176.42,94.49,58.03 +174.54,93.28,58.01 +174.35,92.1,57.73 +175.01,91.62,57.58 +175.01,90.76,57.3 +170.57,90.67,57.14 +170.6,90.8,57.27 +171.08,90.57,57.81 +169.23,90.34,57.43 +172.26,90.55,57.63 +172.23,89.91,58.71 +173.03,90.66,58.93 +175.0,91.88,59.61 +174.35,92.82,59.31 +174.33,92.12,59.18 +174.29,92.38,59.82 +175.28,93.55,60.0 +177.09,96.57,60.4 +176.19,95.86,60.56 +179.1,97.28,60.66 +179.26,97.5,61.09 +178.46,97.8,61.26 +177.0,97.33,61.41 +177.04,96.76,61.69 +174.22,95.84,60.83 +171.11,97.68,60.55 +171.51,99.0,57.99 +167.96,99.18,57.02 +166.97,99.8,57.19 +167.43,99.46,56.81 +167.78,99.12,56.0 +160.5,103.87,55.77 +156.49,101.06,54.69 +163.03,102.76,55.61 +159.54,102.63,54.46 From 323188049ee53a3cdd399d5088246796b0eeb999 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 9 Sep 2019 08:21:49 -0400 Subject: [PATCH 157/329] update --- tf2.0/.gitignore | 6 +- tf2.0/extra_reading.txt | 5 +- tf2.0/rl_trader.py | 411 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 418 insertions(+), 4 deletions(-) create mode 100644 tf2.0/rl_trader.py diff --git a/tf2.0/.gitignore b/tf2.0/.gitignore index 8369960b..5d414047 100644 --- a/tf2.0/.gitignore +++ b/tf2.0/.gitignore @@ -1,3 +1,3 @@ -rl_trader_working.py -rl_trader_models -rl_trader_rewards +rl_trader_working*.py +*rl_trader_models +*rl_trader_rewards diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt index 7252c4e0..1542aa56 100644 --- a/tf2.0/extra_reading.txt +++ b/tf2.0/extra_reading.txt @@ -18,4 +18,7 @@ On the Practical Computational Power of Finite Precision RNNs for Language Recog https://arxiv.org/abs/1805.04908 Massive Exploration of Neural Machine Translation Architectures -https://arxiv.org/abs/1703.03906 \ No newline at end of file +https://arxiv.org/abs/1703.03906 + +Practical Deep Reinforcement Learning Approach for Stock Trading +https://arxiv.org/abs/1811.07522 \ No newline at end of file diff --git a/tf2.0/rl_trader.py b/tf2.0/rl_trader.py new file mode 100644 index 00000000..694d45ee --- /dev/null +++ b/tf2.0/rl_trader.py @@ -0,0 +1,411 @@ +import numpy as np +import pandas as pd + +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Dense, Input +from tensorflow.keras.optimizers import Adam + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + +from sklearn.preprocessing import StandardScaler + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +def mlp(input_dim, n_action, n_hidden_layers=1, hidden_dim=32): + """ A multi-layer perceptron """ + + # input layer + i = Input(shape=(input_dim,)) + x = i + + # hidden layers + for _ in range(n_hidden_layers): + x = Dense(hidden_dim, activation='relu')(x) + + # final layer + x = Dense(n_action)(x) + + # make the model + model = Model(i, x) + + model.compile(loss='mse', optimizer='adam') + print((model.summary())) + return model + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = mlp(state_size, action_size) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state) + return np.argmax(act_values[0]) # returns action + + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the tentative target: Q(s',a) + target = rewards + self.gamma * np.amax(self.model.predict(next_states), axis=1) + + # The value of terminal states is zero + # so set the target to be the reward only + target[done] = rewards[done] + + # With the Keras API, the target (usually) must have the same + # shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = self.model.predict(states) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + self.model.train_on_batch(states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/dqn.h5') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/dqn.h5') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) From 0e97857f47c8a59c3a1707ef9589dc2bdffe3139 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 12 Sep 2019 00:00:13 -0400 Subject: [PATCH 158/329] update --- rl/extra_reading.txt | 3 + rl/linear_rl_trader.py | 385 +++++++++++++++++++++++++++++++++++++++++ rl/plot_rl_rewards.py | 16 ++ 3 files changed, 404 insertions(+) create mode 100644 rl/linear_rl_trader.py create mode 100644 rl/plot_rl_rewards.py diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index 81f52a48..6db18878 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -1,6 +1,9 @@ Hacking Google reCAPTCHA v3 using Reinforcement Learning https://arxiv.org/pdf/1903.01003.pdf +Practical Deep Reinforcement Learning Approach for Stock Trading +https://arxiv.org/abs/1811.07522 + Reinforcement Learning: A Tutorial Survey and Recent Advances - Abhijit Gosavi http://web.mst.edu/~gosavia/joc.pdf diff --git a/rl/linear_rl_trader.py b/rl/linear_rl_trader.py new file mode 100644 index 00000000..9bda27e7 --- /dev/null +++ b/rl/linear_rl_trader.py @@ -0,0 +1,385 @@ +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + +from sklearn.preprocessing import StandardScaler + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('../tf2.0/aapl_msi_sbux.csv') + return df.values + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + +class LinearModel: + """ A linear regression model """ + def __init__(self, input_dim, n_action): + self.W = np.random.randn(input_dim, n_action) / np.sqrt(input_dim) + self.b = np.zeros(n_action) + + # momentum terms + self.vW = 0 + self.vb = 0 + + self.losses = [] + + def predict(self, X): + # make sure X is N x D + assert(len(X.shape) == 2) + return X.dot(self.W) + self.b + + def sgd(self, X, Y, learning_rate=0.01, momentum=0.9): + # make sure X is N x D + assert(len(X.shape) == 2) + + # the loss values are 2-D + # normally we would divide by N only + # but now we divide by N x K + num_values = np.prod(Y.shape) + + # do one step of gradient descent + # we multiply by 2 to get the exact gradient + # (not adjusting the learning rate) + # i.e. d/dx (x^2) --> 2x + Yhat = self.predict(X) + gW = 2 * X.T.dot(Yhat - Y) / num_values + gb = 2 * (Yhat - Y).sum(axis=0) / num_values + + # update momentum terms + self.vW = momentum * self.vW - learning_rate * gW + self.vb = momentum * self.vb - learning_rate * gb + + # update params + self.W += self.vW + self.b += self.vb + + mse = np.mean((Yhat - Y)**2) + self.losses.append(mse) + + def load_weights(self, filepath): + npz = np.load(filepath) + self.W = npz['W'] + self.b = npz['b'] + + def save_weights(self, filepath): + np.savez(filepath, W=self.W, b=self.b) + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = LinearModel(state_size, action_size) + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state) + return np.argmax(act_values[0]) # returns action + + + def train(self, state, action, reward, next_state, done): + if done: + target = reward + else: + target = reward + self.gamma * np.amax(self.model.predict(next_state), axis=1) + + target_full = self.model.predict(state) + target_full[0, action] = target + + # Run one training step + self.model.sgd(state, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.train(state, action, reward, next_state, done) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'linear_rl_trader_models' + rewards_folder = 'linear_rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/linear.npz') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/linear.npz') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + # plot losses + plt.plot(agent.model.losses) + plt.show() + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) diff --git a/rl/plot_rl_rewards.py b/rl/plot_rl_rewards.py new file mode 100644 index 00000000..e239b501 --- /dev/null +++ b/rl/plot_rl_rewards.py @@ -0,0 +1,16 @@ +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') +args = parser.parse_args() + +a = np.load(f'linear_rl_trader_rewards/{args.mode}.npy') + +print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") + +plt.hist(a, bins=20) +plt.title(args.mode) +plt.show() \ No newline at end of file From d7e2e14f903377a24b53ce002b24c5cfbc7279c5 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 17 Oct 2019 00:45:35 -0400 Subject: [PATCH 159/329] update --- unsupervised_class/kmeans.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsupervised_class/kmeans.py b/unsupervised_class/kmeans.py index b243b426..bb2e2659 100644 --- a/unsupervised_class/kmeans.py +++ b/unsupervised_class/kmeans.py @@ -33,13 +33,12 @@ def cost(X, R, M): def plot_k_means(X, K, max_iter=20, beta=3.0, show_plots=False): N, D = X.shape - M = np.zeros((K, D)) # R = np.zeros((N, K)) exponents = np.empty((N, K)) # initialize M to random - for k in range(K): - M[k] = X[np.random.choice(N)] + initial_centers = np.random.choice(N, K, replace=False) + M = X[initial_centers] costs = [] k = 0 From 0d268b346bdd64b99c9d7295892aaf9583b6bd37 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 9 Nov 2019 17:20:36 -0500 Subject: [PATCH 160/329] update --- supervised_class/dt.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/supervised_class/dt.py b/supervised_class/dt.py index f76299d3..b31933ff 100644 --- a/supervised_class/dt.py +++ b/supervised_class/dt.py @@ -25,10 +25,12 @@ def entropy(y): class TreeNode: - def __init__(self, depth=0, max_depth=None): - # print 'depth:', depth + def __init__(self, depth=1, max_depth=None): + print('depth:', depth) self.depth = depth self.max_depth = max_depth + if self.max_depth is not None and self.max_depth < self.depth: + raise Exception("depth > max_depth") def fit(self, X, Y): if len(Y) == 1 or len(set(Y)) == 1: From dafca78beb2f673500f7acbd0c78fee6d2fb33e5 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 23 Nov 2019 19:40:28 -0500 Subject: [PATCH 161/329] update --- nlp_class/sentiment.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/nlp_class/sentiment.py b/nlp_class/sentiment.py index c852ac2f..e5f58477 100644 --- a/nlp_class/sentiment.py +++ b/nlp_class/sentiment.py @@ -34,22 +34,13 @@ # load the reviews # data courtesy of http://www.cs.jhu.edu/~mdredze/datasets/sentiment/index2.html -positive_reviews = BeautifulSoup(open('electronics/positive.review').read()) +positive_reviews = BeautifulSoup(open('electronics/positive.review').read(), features="html5lib") positive_reviews = positive_reviews.findAll('review_text') -negative_reviews = BeautifulSoup(open('electronics/negative.review').read()) +negative_reviews = BeautifulSoup(open('electronics/negative.review').read(), features="html5lib") negative_reviews = negative_reviews.findAll('review_text') -# there are more positive reviews than negative reviews -# so let's take a random sample so we have balanced classes -# np.random.shuffle(positive_reviews) -# positive_reviews = positive_reviews[:len(negative_reviews)] -# we can also oversample the negative reviews -diff = len(positive_reviews) - len(negative_reviews) -idxs = np.random.choice(len(negative_reviews), size=diff) -extra = [negative_reviews[i] for i in idxs] -negative_reviews += extra # first let's just try to tokenize the text using nltk's tokenizer # let's take the first review for example: From 6059c2d0b67271513317d1fbd2ca19ab1fe3b301 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 29 Nov 2019 02:53:19 -0500 Subject: [PATCH 162/329] update --- nlp_class/cipher_evolve.py | 293 +++++++++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 nlp_class/cipher_evolve.py diff --git a/nlp_class/cipher_evolve.py b/nlp_class/cipher_evolve.py new file mode 100644 index 00000000..8cf7ecb8 --- /dev/null +++ b/nlp_class/cipher_evolve.py @@ -0,0 +1,293 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me + +import numpy as np +import matplotlib.pyplot as plt + +import string +import random +import re +import requests +import os + + +### create substitution cipher + +# one will act as the key, other as the value +letters1 = list(string.ascii_lowercase) +letters2 = list(string.ascii_lowercase) + +true_mapping = {} + +# shuffle second set of letters +random.shuffle(letters2) + +# populate map +for k, v in zip(letters1, letters2): + true_mapping[k] = v + + + +### the language model + +# initialize Markov matrix +M = np.ones((26, 26)) + +# initial state distribution +pi = np.zeros(26) + +# a function to update the Markov matrix +def update_transition(ch1, ch2): + # ord('a') = 97, ord('b') = 98, ... + i = ord(ch1) - 97 + j = ord(ch2) - 97 + M[i,j] += 1 + +# a function to update the initial state distribution +def update_pi(ch): + i = ord(ch) - 97 + pi[i] += 1 + +# get the log-probability of a word / token +def get_word_prob(word): + # print("word:", word) + i = ord(word[0]) - 97 + logp = np.log(pi[i]) + + for ch in word[1:]: + j = ord(ch) - 97 + logp += np.log(M[i, j]) # update prob + i = j # update j + + return logp + +# get the probability of a sequence of words +def get_sequence_prob(words): + # if input is a string, split into an array of tokens + if type(words) == str: + words = words.split() + + logp = 0 + for word in words: + logp += get_word_prob(word) + return logp + + +### create a markov model based on an English dataset +# is an edit of https://www.gutenberg.org/ebooks/2701 +# (I removed the front and back matter) + +# download the file +if not os.path.exists('moby_dick.txt'): + print("Downloading moby dick...") + r = requests.get('https://lazyprogrammer.me/course_files/moby_dick.txt') + with open('moby_dick.txt', 'w') as f: + f.write(r.content.decode()) + +# for replacing non-alpha characters +regex = re.compile('[^a-zA-Z]') + +# load in words +for line in open('moby_dick.txt'): + line = line.rstrip() + + # there are blank lines in the file + if line: + line = regex.sub(' ', line) # replace all non-alpha characters with space + + # split the tokens in the line and lowercase + tokens = line.lower().split() + + for token in tokens: + # update the model + + # first letter + ch0 = token[0] + update_pi(ch0) + + # other letters + for ch1 in token[1:]: + update_transition(ch0, ch1) + ch0 = ch1 + +# normalize the probabilities +pi /= pi.sum() +M /= M.sum(axis=1, keepdims=True) + + +### encode a message + +# this is a random excerpt from Project Gutenberg's +# The Adventures of Sherlock Holmes, by Arthur Conan Doyle +# https://www.gutenberg.org/ebooks/1661 + +original_message = '''I then lounged down the street and found, +as I expected, that there was a mews in a lane which runs down +by one wall of the garden. I lent the ostlers a hand in rubbing +down their horses, and received in exchange twopence, a glass of +half-and-half, two fills of shag tobacco, and as much information +as I could desire about Miss Adler, to say nothing of half a dozen +other people in the neighbourhood in whom I was not in the least +interested, but whose biographies I was compelled to listen to. +''' + +# Away they went, and I was just wondering whether I should not do well +# to follow them when up the lane came a neat little landau, the coachman +# with his coat only half-buttoned, and his tie under his ear, while all +# the tags of his harness were sticking out of the buckles. It hadn't +# pulled up before she shot out of the hall door and into it. I only +# caught a glimpse of her at the moment, but she was a lovely woman, with +# a face that a man might die for. + +# My cabby drove fast. I don't think I ever drove faster, but the others +# were there before us. The cab and the landau with their steaming horses +# were in front of the door when I arrived. I paid the man and hurried +# into the church. There was not a soul there save the two whom I had +# followed and a surpliced clergyman, who seemed to be expostulating with +# them. They were all three standing in a knot in front of the altar. I +# lounged up the side aisle like any other idler who has dropped into a +# church. Suddenly, to my surprise, the three at the altar faced round to +# me, and Godfrey Norton came running as hard as he could towards me. + + + +# a function to encode a message +def encode_message(msg): + # downcase + msg = msg.lower() + + # replace non-alpha characters + msg = regex.sub(' ', msg) + + # make the encoded message + coded_msg = [] + for ch in msg: + coded_ch = ch # could just be a space + if ch in true_mapping: + coded_ch = true_mapping[ch] + coded_msg.append(coded_ch) + + return ''.join(coded_msg) + + +encoded_message = encode_message(original_message) + + +# a function to decode a message +def decode_message(msg, word_map): + decoded_msg = [] + for ch in msg: + decoded_ch = ch # could just be a space + if ch in word_map: + decoded_ch = word_map[ch] + decoded_msg.append(decoded_ch) + + return ''.join(decoded_msg) + + + +### run an evolutionary algorithm to decode the message + +# this is our initialization point +dna_pool = [] +for _ in range(20): + dna = list(string.ascii_lowercase) + random.shuffle(dna) + dna_pool.append(dna) + + +def evolve_offspring(dna_pool, n_children): + # make n_children per offspring + offspring = [] + + for dna in dna_pool: + for _ in range(n_children): + copy = dna.copy() + j = np.random.randint(len(copy)) + k = np.random.randint(len(copy)) + + # switch + tmp = copy[j] + copy[j] = copy[k] + copy[k] = tmp + offspring.append(copy) + + return offspring + dna_pool + + + +num_iters = 1000 +scores = np.zeros(num_iters) +prev_score = float('-inf') +best_dna = None +best_map = None +best_score = float('-inf') +for i in range(num_iters): + if i > 0: + # get offspring from the current dna pool + dna_pool = evolve_offspring(dna_pool, 3) + + # calculate score for each dna + dna2score = {} + for dna in dna_pool: + # populate map + current_map = {} + for k, v in zip(letters1, dna): + current_map[k] = v + + decoded_message = decode_message(encoded_message, current_map) + score = get_sequence_prob(decoded_message) + + # store it + # needs to be a string to be a dict key + dna2score[''.join(dna)] = score + + # record the best so far + if score > best_score: + best_dna = dna + best_map = current_map + best_score = score + + # average score for this generation + scores[i] = np.mean(list(dna2score.values())) + + # keep the best 3 dna + # also turn them back into list of single chars + sorted_dna = sorted(dna2score.items(), key=lambda x: x[1], reverse=True) + dna_pool = [list(k) for k, v in sorted_dna[:5]] + + if i % 200 == 0: + print("iter:", i, "score:", scores[i], "best so far:", best_score) + + + + + +# use best score +decoded_message = decode_message(encoded_message, best_map) + +print("LL of decoded message:", get_sequence_prob(decoded_message)) +print("LL of true message:", get_sequence_prob(regex.sub(' ', original_message.lower()))) + + +# which letters are wrong? +for true, v in true_mapping.items(): + pred = best_map[v] + if true != pred: + print("true: %s, pred: %s" % (true, pred)) + + +# print the final decoded message +print("Decoded message:\n", decoded_message) + +print("\nTrue message:\n", original_message) + +plt.plot(scores) +plt.show() + + + + + From 48b5b3e97f1f80b6493025fbbc52715b77d5c6bc Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 12 Dec 2019 22:37:55 -0500 Subject: [PATCH 163/329] update --- nlp_class/cipher_placeholder.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 nlp_class/cipher_placeholder.py diff --git a/nlp_class/cipher_placeholder.py b/nlp_class/cipher_placeholder.py new file mode 100644 index 00000000..0c08b818 --- /dev/null +++ b/nlp_class/cipher_placeholder.py @@ -0,0 +1,23 @@ +# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python +# https://www.udemy.com/data-science-natural-language-processing-in-python + +# Author: http://lazyprogrammer.me + +# Get the data from here: +# https://lazyprogrammer.me/course_files/moby_dick.txt + +### encode a message + +# this is a random excerpt from Project Gutenberg's +# The Adventures of Sherlock Holmes, by Arthur Conan Doyle +# https://www.gutenberg.org/ebooks/1661 + +original_message = '''I then lounged down the street and found, +as I expected, that there was a mews in a lane which runs down +by one wall of the garden. I lent the ostlers a hand in rubbing +down their horses, and received in exchange twopence, a glass of +half-and-half, two fills of shag tobacco, and as much information +as I could desire about Miss Adler, to say nothing of half a dozen +other people in the neighbourhood in whom I was not in the least +interested, but whose biographies I was compelled to listen to. +''' \ No newline at end of file From 04c44b3691e1a475cb72ea3a8e8633d4ed25a6be Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 13 Dec 2019 17:18:45 -0500 Subject: [PATCH 164/329] update --- nlp_class/cipher_evolve.py | 293 ------------------------------------- 1 file changed, 293 deletions(-) delete mode 100644 nlp_class/cipher_evolve.py diff --git a/nlp_class/cipher_evolve.py b/nlp_class/cipher_evolve.py deleted file mode 100644 index 8cf7ecb8..00000000 --- a/nlp_class/cipher_evolve.py +++ /dev/null @@ -1,293 +0,0 @@ -# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python -# https://www.udemy.com/data-science-natural-language-processing-in-python - -# Author: http://lazyprogrammer.me - -import numpy as np -import matplotlib.pyplot as plt - -import string -import random -import re -import requests -import os - - -### create substitution cipher - -# one will act as the key, other as the value -letters1 = list(string.ascii_lowercase) -letters2 = list(string.ascii_lowercase) - -true_mapping = {} - -# shuffle second set of letters -random.shuffle(letters2) - -# populate map -for k, v in zip(letters1, letters2): - true_mapping[k] = v - - - -### the language model - -# initialize Markov matrix -M = np.ones((26, 26)) - -# initial state distribution -pi = np.zeros(26) - -# a function to update the Markov matrix -def update_transition(ch1, ch2): - # ord('a') = 97, ord('b') = 98, ... - i = ord(ch1) - 97 - j = ord(ch2) - 97 - M[i,j] += 1 - -# a function to update the initial state distribution -def update_pi(ch): - i = ord(ch) - 97 - pi[i] += 1 - -# get the log-probability of a word / token -def get_word_prob(word): - # print("word:", word) - i = ord(word[0]) - 97 - logp = np.log(pi[i]) - - for ch in word[1:]: - j = ord(ch) - 97 - logp += np.log(M[i, j]) # update prob - i = j # update j - - return logp - -# get the probability of a sequence of words -def get_sequence_prob(words): - # if input is a string, split into an array of tokens - if type(words) == str: - words = words.split() - - logp = 0 - for word in words: - logp += get_word_prob(word) - return logp - - -### create a markov model based on an English dataset -# is an edit of https://www.gutenberg.org/ebooks/2701 -# (I removed the front and back matter) - -# download the file -if not os.path.exists('moby_dick.txt'): - print("Downloading moby dick...") - r = requests.get('https://lazyprogrammer.me/course_files/moby_dick.txt') - with open('moby_dick.txt', 'w') as f: - f.write(r.content.decode()) - -# for replacing non-alpha characters -regex = re.compile('[^a-zA-Z]') - -# load in words -for line in open('moby_dick.txt'): - line = line.rstrip() - - # there are blank lines in the file - if line: - line = regex.sub(' ', line) # replace all non-alpha characters with space - - # split the tokens in the line and lowercase - tokens = line.lower().split() - - for token in tokens: - # update the model - - # first letter - ch0 = token[0] - update_pi(ch0) - - # other letters - for ch1 in token[1:]: - update_transition(ch0, ch1) - ch0 = ch1 - -# normalize the probabilities -pi /= pi.sum() -M /= M.sum(axis=1, keepdims=True) - - -### encode a message - -# this is a random excerpt from Project Gutenberg's -# The Adventures of Sherlock Holmes, by Arthur Conan Doyle -# https://www.gutenberg.org/ebooks/1661 - -original_message = '''I then lounged down the street and found, -as I expected, that there was a mews in a lane which runs down -by one wall of the garden. I lent the ostlers a hand in rubbing -down their horses, and received in exchange twopence, a glass of -half-and-half, two fills of shag tobacco, and as much information -as I could desire about Miss Adler, to say nothing of half a dozen -other people in the neighbourhood in whom I was not in the least -interested, but whose biographies I was compelled to listen to. -''' - -# Away they went, and I was just wondering whether I should not do well -# to follow them when up the lane came a neat little landau, the coachman -# with his coat only half-buttoned, and his tie under his ear, while all -# the tags of his harness were sticking out of the buckles. It hadn't -# pulled up before she shot out of the hall door and into it. I only -# caught a glimpse of her at the moment, but she was a lovely woman, with -# a face that a man might die for. - -# My cabby drove fast. I don't think I ever drove faster, but the others -# were there before us. The cab and the landau with their steaming horses -# were in front of the door when I arrived. I paid the man and hurried -# into the church. There was not a soul there save the two whom I had -# followed and a surpliced clergyman, who seemed to be expostulating with -# them. They were all three standing in a knot in front of the altar. I -# lounged up the side aisle like any other idler who has dropped into a -# church. Suddenly, to my surprise, the three at the altar faced round to -# me, and Godfrey Norton came running as hard as he could towards me. - - - -# a function to encode a message -def encode_message(msg): - # downcase - msg = msg.lower() - - # replace non-alpha characters - msg = regex.sub(' ', msg) - - # make the encoded message - coded_msg = [] - for ch in msg: - coded_ch = ch # could just be a space - if ch in true_mapping: - coded_ch = true_mapping[ch] - coded_msg.append(coded_ch) - - return ''.join(coded_msg) - - -encoded_message = encode_message(original_message) - - -# a function to decode a message -def decode_message(msg, word_map): - decoded_msg = [] - for ch in msg: - decoded_ch = ch # could just be a space - if ch in word_map: - decoded_ch = word_map[ch] - decoded_msg.append(decoded_ch) - - return ''.join(decoded_msg) - - - -### run an evolutionary algorithm to decode the message - -# this is our initialization point -dna_pool = [] -for _ in range(20): - dna = list(string.ascii_lowercase) - random.shuffle(dna) - dna_pool.append(dna) - - -def evolve_offspring(dna_pool, n_children): - # make n_children per offspring - offspring = [] - - for dna in dna_pool: - for _ in range(n_children): - copy = dna.copy() - j = np.random.randint(len(copy)) - k = np.random.randint(len(copy)) - - # switch - tmp = copy[j] - copy[j] = copy[k] - copy[k] = tmp - offspring.append(copy) - - return offspring + dna_pool - - - -num_iters = 1000 -scores = np.zeros(num_iters) -prev_score = float('-inf') -best_dna = None -best_map = None -best_score = float('-inf') -for i in range(num_iters): - if i > 0: - # get offspring from the current dna pool - dna_pool = evolve_offspring(dna_pool, 3) - - # calculate score for each dna - dna2score = {} - for dna in dna_pool: - # populate map - current_map = {} - for k, v in zip(letters1, dna): - current_map[k] = v - - decoded_message = decode_message(encoded_message, current_map) - score = get_sequence_prob(decoded_message) - - # store it - # needs to be a string to be a dict key - dna2score[''.join(dna)] = score - - # record the best so far - if score > best_score: - best_dna = dna - best_map = current_map - best_score = score - - # average score for this generation - scores[i] = np.mean(list(dna2score.values())) - - # keep the best 3 dna - # also turn them back into list of single chars - sorted_dna = sorted(dna2score.items(), key=lambda x: x[1], reverse=True) - dna_pool = [list(k) for k, v in sorted_dna[:5]] - - if i % 200 == 0: - print("iter:", i, "score:", scores[i], "best so far:", best_score) - - - - - -# use best score -decoded_message = decode_message(encoded_message, best_map) - -print("LL of decoded message:", get_sequence_prob(decoded_message)) -print("LL of true message:", get_sequence_prob(regex.sub(' ', original_message.lower()))) - - -# which letters are wrong? -for true, v in true_mapping.items(): - pred = best_map[v] - if true != pred: - print("true: %s, pred: %s" % (true, pred)) - - -# print the final decoded message -print("Decoded message:\n", decoded_message) - -print("\nTrue message:\n", original_message) - -plt.plot(scores) -plt.show() - - - - - From 4de23e32804c022a3c3079377320cbb8d4d0acc8 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 17 Dec 2019 13:07:19 -0500 Subject: [PATCH 165/329] update --- nlp_class3/memory_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nlp_class3/memory_network.py b/nlp_class3/memory_network.py index a73a3eed..a5603f5c 100644 --- a/nlp_class3/memory_network.py +++ b/nlp_class3/memory_network.py @@ -43,7 +43,7 @@ def tokenize(sent): >>> tokenize('Bob dropped the apple. Where is the apple?') ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?'] ''' - return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()] + return [x.strip() for x in re.split('(\W+?)', sent) if x.strip()] From 3740a6fa26ca5cc5c644c51b2fb20b94eef8da8e Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 26 Dec 2019 03:02:24 -0500 Subject: [PATCH 166/329] update --- nlp_class3/attention.py | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index d7e9c809..8e7e3735 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -324,11 +324,33 @@ def stack_and_transpose(x): outputs=outputs ) + +def custom_loss(y_true, y_pred): + # both are of shape N x T x K + mask = K.cast(y_true > 0, dtype='float32') + out = mask * y_true * K.log(y_pred) + return -K.sum(out) / K.sum(mask) + + +def acc(y_true, y_pred): + # both are of shape N x T x K + targ = K.argmax(y_true, axis=-1) + pred = K.argmax(y_pred, axis=-1) + correct = K.cast(K.equal(targ, pred), dtype='float32') + + # 0 is padding, don't include those + mask = K.cast(K.greater(targ, 0), dtype='float32') + n_correct = K.sum(mask * correct) + n_total = K.sum(mask) + return n_correct / n_total + + # compile the model -model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) +model.compile(optimizer='adam', loss=custom_loss, metrics=[acc]) +# model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc']) # train the model -z = np.zeros((NUM_SAMPLES, LATENT_DIM_DECODER)) # initial [s, c] +z = np.zeros((len(encoder_inputs), LATENT_DIM_DECODER)) # initial [s, c] r = model.fit( [encoder_inputs, decoder_inputs, z, z], decoder_targets_one_hot, batch_size=BATCH_SIZE, @@ -336,6 +358,8 @@ def stack_and_transpose(x): validation_split=0.2 ) + + # plot some data plt.plot(r.history['loss'], label='loss') plt.plot(r.history['val_loss'], label='val_loss') @@ -371,6 +395,9 @@ def stack_and_transpose(x): # combine context with last word decoder_lstm_input = context_last_word_concat_layer([context, decoder_inputs_single_x]) + + + # lstm and final dense o, s, c = decoder_lstm(decoder_lstm_input, initial_state=[initial_s, initial_c]) decoder_outputs = decoder_dense(o) From ce650d61aae18d753dce14d57d09c1e80ee5a8a3 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 26 Dec 2019 03:52:44 -0500 Subject: [PATCH 167/329] update --- nlp_class3/wseq2seq.py | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index 384dbf46..e83d669a 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -23,10 +23,9 @@ # some config BATCH_SIZE = 64 # Batch size for training. -EPOCHS = 100 # Number of epochs to train for. +EPOCHS = 40 # Number of epochs to train for. LATENT_DIM = 256 # Latent dimensionality of the encoding space. NUM_SAMPLES = 10000 # Number of samples to train on. -MAX_SEQUENCE_LENGTH = 100 MAX_NUM_WORDS = 20000 EMBEDDING_DIM = 100 @@ -219,12 +218,37 @@ # Create the model object model = Model([encoder_inputs_placeholder, decoder_inputs_placeholder], decoder_outputs) + +def custom_loss(y_true, y_pred): + # both are of shape N x T x K + mask = K.cast(y_true > 0, dtype='float32') + out = mask * y_true * K.log(y_pred) + return -K.sum(out) / K.sum(mask) + + +def acc(y_true, y_pred): + # both are of shape N x T x K + targ = K.argmax(y_true, axis=-1) + pred = K.argmax(y_pred, axis=-1) + correct = K.cast(K.equal(targ, pred), dtype='float32') + + # 0 is padding, don't include those + mask = K.cast(K.greater(targ, 0), dtype='float32') + n_correct = K.sum(mask * correct) + n_total = K.sum(mask) + return n_correct / n_total + +model.compile(optimizer='adam', loss=custom_loss, metrics=[acc]) + # Compile the model and train it -model.compile( - optimizer='rmsprop', - loss='categorical_crossentropy', - metrics=['accuracy'] -) +# model.compile( +# optimizer='rmsprop', +# loss='categorical_crossentropy', +# metrics=['accuracy'] +# ) + + + r = model.fit( [encoder_inputs, decoder_inputs], decoder_targets_one_hot, batch_size=BATCH_SIZE, From 7390667467ec9b490589f7de60d5f5842acf1834 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 6 Jan 2020 23:08:44 -0500 Subject: [PATCH 168/329] update --- supervised_class2/rf_regression.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/supervised_class2/rf_regression.py b/supervised_class2/rf_regression.py index 2b219a34..06ee72fb 100644 --- a/supervised_class2/rf_regression.py +++ b/supervised_class2/rf_regression.py @@ -66,8 +66,7 @@ def fit_transform(self, df): def get_data(): - # regex allows arbitrary number of spaces in separator - df = pd.read_csv('../large_files/housing.data', header=None, sep=r"\s*", engine='python') + df = pd.read_csv('housing.data', header=None, delim_whitespace=True) df.columns = [ 'crim', # numerical 'zn', # numerical @@ -128,9 +127,9 @@ def get_data(): # do a quick baseline test baseline = LinearRegression() single_tree = DecisionTreeRegressor() - print("CV single tree:", cross_val_score(single_tree, Xtrain, Ytrain).mean()) - print("CV baseline:", cross_val_score(baseline, Xtrain, Ytrain).mean()) - print("CV forest:", cross_val_score(model, Xtrain, Ytrain).mean()) + print("CV single tree:", cross_val_score(single_tree, Xtrain, Ytrain, cv=5).mean()) + print("CV baseline:", cross_val_score(baseline, Xtrain, Ytrain, cv=5).mean()) + print("CV forest:", cross_val_score(model, Xtrain, Ytrain, cv=5).mean()) # test score single_tree.fit(Xtrain, Ytrain) From 9c9b90e82a8a082add2fbf392b806bb54ae6e749 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 23 Jan 2020 22:49:20 -0500 Subject: [PATCH 169/329] update readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 34ef4ab7..5078953a 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ Find associated tutorials at https://lazyprogrammer.me Find associated courses at https://deeplearningcourses.com +Please note that not all code from all courses will be found in this repository. Some newer code examples (e.g. everything from Tensorflow 2.0) were done in Google Colab. Therefore, you should check the instructions given in the lectures for the course you are taking. + Direct Course Links =================== From ef908329bb240f771447e1a1c02b62c746c363de Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 24 Jan 2020 01:05:25 -0500 Subject: [PATCH 170/329] add new course urls --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 5078953a..468fb1ba 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,13 @@ Please note that not all code from all courses will be found in this repository. Direct Course Links =================== +Tensorflow 2.0: Deep Learning and Artificial Intelligence +(Main Course - special discount link) https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 +(VIP Content) https://deeplearningcourses.com/c/deep-learning-tensorflow-2 + +Cutting-Edge AI: Deep Reinforcement Learning in Python +https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence + Recommender Systems and Deep Learning in Python https://deeplearningcourses.com/c/recommender-systems From ef3312d45ac9f09dec757ec80e4c01d262aacc9f Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 3 Feb 2020 18:00:22 -0500 Subject: [PATCH 171/329] fix done flag --- tf2.0/rl_trader.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tf2.0/rl_trader.py b/tf2.0/rl_trader.py index 694d45ee..2f98b964 100644 --- a/tf2.0/rl_trader.py +++ b/tf2.0/rl_trader.py @@ -288,11 +288,7 @@ def replay(self, batch_size=32): done = minibatch['d'] # Calculate the tentative target: Q(s',a) - target = rewards + self.gamma * np.amax(self.model.predict(next_states), axis=1) - - # The value of terminal states is zero - # so set the target to be the reward only - target[done] = rewards[done] + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states), axis=1) # With the Keras API, the target (usually) must have the same # shape as the predictions. From aa80482a39a0ba732372e7bf3cd9b8afa46165b3 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 4 Mar 2020 12:32:45 -0500 Subject: [PATCH 172/329] update --- ann_class2/momentum.py | 14 ++++++++++---- ann_class2/rmsprop.py | 14 ++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/ann_class2/momentum.py b/ann_class2/momentum.py index 5df6fa30..4b3890e3 100644 --- a/ann_class2/momentum.py +++ b/ann_class2/momentum.py @@ -62,11 +62,17 @@ def main(): pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) # print "first batch cost:", cost(pYbatch, Ybatch) + # gradients + gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 + gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 + gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 + gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + # updates - W2 -= lr*(derivative_w2(Z, Ybatch, pYbatch) + reg*W2) - b2 -= lr*(derivative_b2(Ybatch, pYbatch) + reg*b2) - W1 -= lr*(derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1) - b1 -= lr*(derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1) + W2 -= lr*gW2 + b2 -= lr*gb2 + W1 -= lr*gW1 + b1 -= lr*gb1 if j % print_period == 0: pY, _ = forward(Xtest, W1, b1, W2, b2) diff --git a/ann_class2/rmsprop.py b/ann_class2/rmsprop.py index 43afce23..2ec4c4ac 100644 --- a/ann_class2/rmsprop.py +++ b/ann_class2/rmsprop.py @@ -48,11 +48,17 @@ def main(): pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) # print "first batch cost:", cost(pYbatch, Ybatch) + # gradients + gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 + gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 + gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 + gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + # updates - W2 -= lr*(derivative_w2(Z, Ybatch, pYbatch) + reg*W2) - b2 -= lr*(derivative_b2(Ybatch, pYbatch) + reg*b2) - W1 -= lr*(derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1) - b1 -= lr*(derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1) + W2 -= lr*gW2 + b2 -= lr*gb2 + W1 -= lr*gW1 + b1 -= lr*gb1 if j % print_period == 0: # calculate just for LL From c9cf104f5f38b0dce2a4105e89fd1a8a9038d43c Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 20 Mar 2020 18:21:20 -0400 Subject: [PATCH 173/329] add pytorch --- pytorch/.gitignore | 3 + pytorch/aapl_msi_sbux.csv | 1260 ++++++++++++++++++++++++++++++++++++ pytorch/ann_regression.py | 100 +++ pytorch/extra_reading.txt | 27 + pytorch/plot_rl_rewards.py | 16 + pytorch/rl_trader.py | 441 +++++++++++++ 6 files changed, 1847 insertions(+) create mode 100644 pytorch/.gitignore create mode 100644 pytorch/aapl_msi_sbux.csv create mode 100644 pytorch/ann_regression.py create mode 100644 pytorch/extra_reading.txt create mode 100644 pytorch/plot_rl_rewards.py create mode 100644 pytorch/rl_trader.py diff --git a/pytorch/.gitignore b/pytorch/.gitignore new file mode 100644 index 00000000..f9187508 --- /dev/null +++ b/pytorch/.gitignore @@ -0,0 +1,3 @@ +*rl_trader_models +*rl_trader_rewards +*.png diff --git a/pytorch/aapl_msi_sbux.csv b/pytorch/aapl_msi_sbux.csv new file mode 100644 index 00000000..cb98cb88 --- /dev/null +++ b/pytorch/aapl_msi_sbux.csv @@ -0,0 +1,1260 @@ +AAPL,MSI,SBUX +67.8542,60.3,28.185 +68.5614,60.9,28.07 +66.8428,60.83,28.13 +66.7156,60.81,27.915 +66.6556,61.12,27.775 +65.7371,61.43,27.17 +65.7128,62.03,27.225 +64.1214,61.26,26.655 +63.7228,60.88,26.675 +64.4014,61.9,27.085 +63.2571,60.28,26.605 +64.1385,60.63,26.64 +63.5099,62.09,27.285 +63.0571,62.21,27.425 +61.4957,62.03,27.435 +60.0071,62.5,27.85 +61.5919,62.97,28.255 +60.8088,63.11,28.55 +61.5117,62.64,29.125 +61.6742,62.75,29.335 +62.5528,62.56,29.305 +61.2042,62.13,29.14 +61.1928,62.22,29.2925 +61.7857,62.34,28.84 +63.3799,62.07,28.83 +65.1028,61.64,28.465 +64.9271,61.67,28.415 +64.5828,62.4,28.715 +64.6756,62.43,28.525 +65.9871,63.61,28.69 +66.2256,63.29,28.345 +65.8765,63.46,28.525 +64.5828,63.56,28.455 +63.2371,64.03,28.475 +61.2728,63.7,28.435 +61.3988,63.7,29.13 +61.7128,62.8,28.85 +61.1028,62.99,29.055 +60.4571,62.67,28.9 +60.8871,63.17,29.06 +60.9971,63.64,28.705 +62.2414,64.69,28.9 +62.0471,64.63,29.2875 +61.3999,63.87,29.545 +59.9785,61.83,28.855 +60.8914,62.96,29.28 +57.5428,62.13,29.085 +56.0071,61.15,28.86 +55.7899,61.72,29.2025 +56.9528,61.78,29.32 +58.0185,61.75,29.695 +57.9231,56.02,29.915 +58.3399,56.39,30.25 +59.6007,56.8,30.0 +61.4457,57.44,30.29 +63.2542,57.2,30.42 +62.7557,56.37,30.07 +63.6457,56.89,30.19 +64.2828,57.29,30.935 +65.8156,56.95,31.24 +65.5225,56.79,31.095 +66.2628,57.0,31.205 +65.2528,56.78,31.18 +64.7099,56.48,31.5485 +64.9628,56.17,31.41 +63.4085,56.89,31.76 +61.2642,57.1,32.035 +62.0825,57.53,31.775 +61.8942,57.84,32.065 +63.2757,58.25,31.915 +62.8085,57.77,32.125 +63.0505,57.3,32.075 +63.1628,57.48,31.76 +63.5928,57.81,31.68 +63.0627,58.53,32.13 +63.5642,58.32,31.815 +64.5114,58.54,31.735 +64.2478,57.96,31.57 +64.3885,57.83,31.73 +64.1871,57.41,31.665 +63.5871,56.27,31.17 +62.6371,56.92,31.51 +63.1158,56.94,32.52 +62.6985,56.61,33.055 +62.5142,56.38,32.71 +61.7414,56.26,32.225 +62.2807,57.19,32.985 +61.4357,56.93,32.8 +61.7142,57.33,33.015 +61.6814,57.35,33.5475 +60.4285,56.78,33.205 +59.5482,55.5,32.61 +59.0714,55.82,32.345 +57.5057,55.59,32.005 +57.5185,56.35,32.37 +56.8671,57.49,32.9 +56.2542,57.84,32.845 +56.6471,57.73,32.755 +58.4599,57.98,33.12 +59.7842,57.49,33.395 +60.1142,57.26,33.65 +59.6314,57.93,33.86 +59.2928,57.86,34.145 +60.3357,58.03,34.065 +60.1042,58.43,34.05 +61.0411,59.05,34.67 +60.9299,59.54,34.86 +61.0628,59.17,34.83 +61.4564,59.32,34.76 +61.4728,59.42,34.1 +61.6797,59.36,34.24 +60.7071,59.85,34.395 +60.9014,59.87,34.51 +59.8557,59.98,33.83 +62.9299,56.04,33.305 +62.6428,54.25,34.085 +62.9985,54.26,36.68 +63.9699,54.01,36.225 +64.7599,54.35,35.965 +64.6471,54.83,35.6445 +65.2394,55.32,36.74 +66.0771,56.02,37.115 +67.0642,56.1,36.985 +66.4642,56.4,36.4 +66.4256,56.48,36.095 +65.8585,57.13,36.47 +64.9214,57.36,36.4 +66.7656,57.44,36.465 +69.9385,57.84,36.32 +71.2142,57.71,35.925 +71.1299,56.96,35.37 +71.7614,57.15,35.355 +72.5342,57.09,35.145 +71.5814,57.05,35.33 +71.7656,56.06,35.3565 +71.8514,56.33,35.95 +71.5742,56.74,35.985 +71.8528,56.55,35.94 +69.7985,56.12,35.08 +70.1279,56.39,35.48 +70.2428,56.19,35.59 +69.6022,56.01,35.26 +69.7971,56.28,35.8 +71.2415,56.08,36.07 +70.7528,56.17,36.025 +71.1742,56.47,35.785 +72.3099,57.59,36.22 +70.6628,57.37,37.1075 +66.8156,57.25,37.695 +67.5271,57.5,37.835 +66.4142,57.46,37.785 +64.3028,57.81,37.62 +65.0456,58.28,38.02 +66.3828,59.26,38.665 +67.4714,59.69,38.175 +66.7728,60.39,38.06 +70.0914,60.37,37.68 +69.8714,59.99,38.275 +68.7899,59.85,38.17 +69.4599,59.87,38.59 +68.9642,59.75,38.665 +68.1071,59.38,38.485 +69.7085,60.89,38.58 +69.9371,60.7,38.595 +69.0585,60.56,38.435 +69.0042,61.14,38.7 +69.6785,60.89,38.4305 +68.7056,59.62,37.765 +69.5125,59.39,37.63 +69.9482,60.61,38.56 +70.4016,60.52,38.91 +70.8628,61.03,39.05 +71.2399,60.49,38.355 +71.5876,60.71,39.02 +72.0714,60.92,39.3675 +72.6985,60.81,39.655 +74.4802,61.18,39.73 +74.2667,60.43,40.45 +74.9942,62.4,40.025 +75.9871,62.51,39.525 +75.1368,62.99,39.98 +75.6965,62.44,39.355 +73.8111,62.73,39.81 +74.9851,62.25,40.415 +74.6716,62.52,40.525 +74.2899,62.39,40.185 +75.2499,62.71,40.185 +75.0641,62.68,40.995 +74.4171,62.65,40.565 +73.2131,62.49,39.535 +74.3656,63.12,40.6 +74.1496,63.51,40.495 +74.2871,64.24,40.3075 +74.3762,64.45,40.7305 +75.4514,64.58,40.57 +74.9986,65.57,40.595 +74.0898,65.42,40.27 +74.2214,64.61,39.96 +73.5714,64.58,39.845 +74.4479,65.41,40.765 +74.2571,65.88,40.675 +74.8199,65.79,40.355 +76.1999,65.57,40.755 +77.9942,65.5,40.81 +79.4385,65.88,40.73 +78.7471,65.66,40.535 +80.9031,65.79,40.275 +80.7142,64.93,39.75 +81.1286,65.23,39.86 +80.0028,66.18,39.97 +80.9185,65.79,39.865 +80.7928,65.41,38.69 +80.1942,64.6,38.2 +80.0771,64.86,38.24 +79.2042,65.05,38.175 +79.6428,65.36,38.23 +79.2842,65.52,38.045 +78.6813,66.16,38.84 +77.7799,65.85,38.575 +78.4314,65.61,38.83 +81.4413,66.78,39.16 +81.0956,67.1,39.285 +80.5571,67.18,39.44 +80.0128,67.33,39.285 +79.2171,67.25,39.275 +80.1456,67.5,39.195 +79.0185,66.33,38.585 +77.2828,66.2,38.475 +77.7042,65.92,38.085 +77.1481,66.19,38.605 +77.6371,65.99,39.015 +76.6455,66.5,38.8 +76.1342,66.15,38.835 +76.5328,65.49,37.56 +78.0556,66.35,37.73 +79.6228,65.62,38.095 +79.1785,65.81,37.645 +77.2385,66.1,37.45 +78.4385,67.11,36.825 +78.7871,64.51,36.8 +79.4542,65.34,36.695 +78.0099,64.42,37.49 +78.6428,64.43,37.105 +72.3571,64.34,36.945 +71.5356,63.98,35.78 +71.3974,64.91,35.955 +71.5142,63.8,35.56 +71.6471,62.72,34.485 +72.6842,62.99,35.325 +73.2271,62.89,35.245 +73.2156,63.4,36.18 +74.2399,64.6,37.0175 +75.5699,65.08,37.4 +76.5656,65.03,37.25 +76.5599,65.78,36.955 +77.7756,65.67,37.345 +77.7128,65.61,37.515 +77.9985,65.78,36.985 +76.7671,64.93,36.66 +75.8785,65.22,36.775 +75.0356,65.02,36.28 +75.3642,64.96,36.28 +74.5799,65.1,35.275 +73.9071,65.45,35.89 +75.3814,65.9,36.095 +75.1771,66.2,35.48 +75.3942,65.98,35.235 +75.8914,66.76,35.83 +76.0514,66.33,35.65 +75.8214,66.57,36.345 +75.7771,66.64,36.535 +75.8456,66.43,36.78 +76.5842,66.08,37.515 +76.6585,65.02,37.815 +75.8071,64.21,37.215 +74.9556,63.67,37.135 +75.2485,65.08,37.09 +75.9142,65.72,37.3 +75.8942,65.7,37.955 +75.5285,66.66,38.4775 +76.1242,66.81,38.355 +77.0271,66.05,37.885 +77.8556,66.18,37.305 +77.1114,65.16,36.77 +76.7799,64.36,36.7 +76.6942,64.3,36.85 +76.6771,64.29,36.69 +77.3785,64.91,37.005 +77.5071,65.1,36.835 +76.9699,65.09,36.545 +75.9742,64.26,35.775 +74.7814,64.43,35.215 +74.7771,64.95,35.74 +75.7599,65.26,36.24 +74.7828,63.99,35.11 +74.2299,63.39,34.365 +74.5256,63.78,34.655 +73.9942,63.37,34.445 +74.1442,63.23,35.395 +74.9914,63.15,35.075 +75.8814,62.51,35.24 +75.9569,63.27,35.5745 +74.9642,63.29,35.195 +81.1099,63.0,35.545 +81.7056,62.5,35.725 +84.8699,62.64,35.465 +84.6185,63.43,35.32 +84.2985,63.58,35.31 +84.4971,62.65,35.56 +84.6542,65.51,35.3 +85.8513,66.15,35.46 +84.9156,66.4,34.79 +84.6185,67.14,34.87 +83.9985,67.38,34.79 +83.6488,67.26,35.145 +84.6899,67.8,35.575 +84.8228,67.75,35.58 +84.8385,67.2,35.085 +84.1171,66.34,34.925 +85.3585,66.3,35.47 +86.3699,66.88,35.51 +86.3871,66.52,35.115 +86.6156,66.89,35.2 +86.7528,66.63,35.7 +87.7328,67.0,35.99 +89.3756,67.02,36.83 +89.1442,66.93,36.635 +90.7685,66.91,36.555 +90.4285,67.42,36.62 +89.8071,67.4,36.925 +91.0771,66.86,37.09 +92.1171,67.23,37.335 +92.4785,67.17,37.36 +92.2242,67.66,37.665 +93.7,67.67,37.59 +94.25,67.7,37.3 +93.86,66.93,37.4 +92.29,66.46,36.98 +91.28,66.78,37.345 +92.2,66.72,37.545 +92.08,66.64,37.655 +92.18,66.62,37.78 +91.86,67.06,38.615 +90.91,67.07,38.3 +90.83,67.1,38.365 +90.28,66.73,38.715 +90.36,66.55,39.06 +90.9,66.56,39.03 +91.98,66.78,38.97 +92.93,66.57,38.69 +93.52,66.96,39.04 +93.48,67.02,39.095 +94.03,67.41,39.53 +95.96799999999999,67.24,39.345 +95.35,66.27,39.28 +95.39,66.58,39.725 +95.035,66.45,39.425 +95.22,66.0,39.3 +96.45,66.08,39.28 +95.32,65.49,39.445 +94.78,65.67,39.365 +93.0899,64.94,38.62 +94.43,65.49,38.97 +93.939,65.74,38.805 +94.72,66.05,39.37 +97.19,65.77,39.57 +97.03,65.61,40.225 +97.671,65.0,39.37 +99.02,65.21,39.18 +98.38,64.74,39.325 +98.15,64.83,39.45 +95.6,63.68,38.84 +96.13,64.11,38.49 +95.59,64.11,38.765 +95.12,61.39,38.395 +94.96,61.21,38.565 +94.48,61.25,38.355 +94.74,62.19,38.81 +95.99,61.73,38.935 +95.97,61.64,38.91 +97.24,62.03,38.62 +97.5,61.52,38.31 +97.98,61.0,38.455 +99.16,60.81,38.795 +100.53,61.37,39.06 +100.57,61.64,39.015 +100.58,61.7,38.735 +101.32,61.23,38.64 +101.54,61.02,38.985 +100.889,60.3,38.895 +102.13,59.68,38.96 +102.25,59.37,38.905 +102.5,59.4,38.905 +103.3,59.01,38.74 +98.94,58.94,38.395 +98.12,58.98,38.58 +98.97,58.89,38.975 +98.36,61.02,38.835 +97.99,61.08,38.56 +101.0,61.22,38.605 +101.43,61.22,38.06 +101.66,61.54,37.735 +101.63,61.42,37.46 +100.86,61.69,37.545 +101.58,61.91,37.67 +101.79,62.04,37.865 +100.96,61.88,38.035 +101.06,61.68,37.3 +102.64,61.57,36.9775 +101.75,61.8,37.66 +97.87,62.24,37.06 +100.75,63.42,37.585 +100.11,63.18,37.635 +100.75,63.28,37.73 +99.18,62.34,37.305 +99.9,61.03,37.225 +99.62,61.3,37.945 +99.62,61.5,37.5725 +98.75,60.47,37.025 +100.8,61.58,37.63 +101.02,60.46,37.24 +100.73,59.05,37.23 +99.81,58.5,36.095 +98.75,58.73,36.37 +97.54,59.32,36.19 +96.26,59.18,36.32 +97.67,60.79,36.77 +99.76,61.25,37.35 +102.47,62.39,37.18 +102.99,61.63,37.3 +104.83,62.25,37.42 +105.22,62.57,37.905 +105.11,62.8,37.985 +106.74,64.06,38.525 +107.34,63.94,38.27 +106.98,63.7,38.66 +108.0,64.5,37.78 +109.4,64.68,38.05 +108.6,66.76,38.355 +108.86,64.46,38.33 +108.7,63.42,38.725 +109.01,64.14,38.895 +108.83,63.94,38.825 +109.7,63.55,38.865 +111.25,63.7,38.925 +112.82,64.43,38.945 +114.18,65.25,39.06 +113.99,65.4,38.915 +115.47,66.0,38.785 +114.67,65.94,38.91 +116.31,65.66,39.1 +116.47,65.27,39.88 +118.625,65.81,40.26 +117.6,65.6,40.105 +119.0,65.56,39.85 +118.93,65.72,40.605 +115.07,65.44,40.425 +114.63,65.51,40.185 +115.93,65.32,40.235 +115.49,65.2,40.655 +115.0,65.0,41.785 +112.4,65.27,41.9 +114.12,65.29,41.515 +111.95,63.52,41.33 +111.62,63.29,41.56 +109.73,62.31,41.625 +108.225,61.91,40.445 +106.745,61.73,39.565 +109.41,63.99,40.2175 +112.65,65.11,40.015 +111.78,65.5,39.72 +112.94,66.53,40.27 +112.54,66.93,40.715 +112.01,67.34,40.635 +113.99,67.49,40.915 +113.91,67.87,41.19 +112.52,67.53,40.895 +110.38,67.08,41.025 +109.33,66.51,40.72 +106.25,65.06,39.94 +106.26,64.51,39.615 +107.75,64.43,40.59 +111.89,65.43,41.245 +112.01,65.11,39.895 +109.25,64.35,40.115 +110.22,64.11,40.435 +109.8,63.76,40.21 +106.82,63.41,39.79 +105.99,64.05,40.305 +108.72,64.02,40.6125 +109.55,64.31,40.645 +112.4,65.36,41.37 +112.98,65.48,44.11 +113.1,65.71,44.06 +109.14,64.94,44.17 +115.31,63.84,43.7825 +118.9,63.83,44.525 +117.16,62.41,43.765 +118.63,62.81,43.995 +118.65,64.01,44.245 +119.56,63.94,44.35 +119.94,64.0,44.82 +118.93,64.66,44.5 +119.72,67.78,44.41 +122.02,68.22,45.59 +124.88,68.57,45.395 +126.46,70.0,45.9125 +127.08,69.91,45.79 +127.83,69.79,46.015 +128.715,69.12,46.5 +128.45,69.03,46.585 +129.495,69.83,46.755 +133.0,68.63,46.79 +132.17,68.53,46.725 +128.79,68.02,47.13 +130.415,68.47,47.275 +128.46,67.94,46.7425 +129.09,68.89,47.1125 +129.36,68.14,47.0 +128.54,67.64,46.53 +126.41,67.93,46.815 +126.6,66.82,46.1075 +127.14,66.57,46.52 +124.51,65.33,46.09 +122.24,65.31,45.71 +124.45,64.96,46.69 +123.59,64.8,46.645 +124.95,65.86,47.0225 +127.04,65.32,47.1925 +128.47,66.65,47.92 +127.495,66.34,48.88 +125.9,66.83,48.73 +127.21,66.52,48.685 +126.69,66.23,48.9575 +123.38,65.35,47.885 +124.24,65.42,47.54 +123.25,65.38,47.535 +126.37,66.39,47.99 +124.43,66.67,47.35 +124.25,66.67,46.51 +125.32,62.51,47.195 +127.35,61.48,47.26 +126.01,61.99,47.035 +125.6,62.42,47.615 +126.56,62.32,47.96 +127.1,62.53,48.17 +126.85,61.97,48.5 +126.3,61.91,48.3 +126.78,61.82,48.14 +126.17,61.86,48.245 +124.75,60.68,47.62 +127.6,61.16,47.97 +126.91,61.43,48.37 +128.62,61.59,48.335 +129.67,60.84,49.43 +130.28,60.57,51.84 +132.65,60.98,50.87 +130.56,60.69,50.61 +128.64,59.74,50.65 +125.15,59.75,49.58 +128.95,60.28,50.29 +128.7,60.68,50.445 +125.8,58.59,49.405 +125.01,58.75,48.93 +125.26,60.01,49.35 +127.62,60.59,49.78 +126.32,59.8,49.5 +125.865,59.42,49.71 +126.01,59.25,49.59 +128.95,59.79,50.555 +128.77,59.3,50.8 +130.19,60.12,51.18 +130.07,59.8,51.42 +130.06,59.8,51.03 +131.39,59.79,51.33 +132.54,59.66,51.48 +129.62,59.11,50.84 +132.045,59.06,51.59 +131.78,59.63,51.81 +130.28,59.0,51.96 +130.535,59.65,52.22 +129.96,59.19,51.73 +130.12,59.48,52.12 +129.36,58.8,51.72 +128.65,58.61,52.19 +127.8,58.08,51.53 +127.42,57.9,51.54 +128.88,58.49,52.69 +128.59,58.55,52.49 +127.17,57.65,52.63 +126.92,57.95,52.27 +127.6,58.18,52.965 +127.3,57.97,53.24 +127.88,58.39,54.11 +126.6,58.05,53.93 +127.61,59.22,53.9 +127.03,59.12,54.115 +128.11,58.29,53.71 +127.5,58.35,54.07 +126.75,58.38,54.62 +124.53,57.14,53.55 +125.425,57.34,53.615 +126.6,57.6,53.89 +126.44,57.51,54.24 +126.0,57.22,54.305 +125.69,57.49,54.375 +122.57,56.79,53.39 +120.07,56.94,54.05 +123.28,57.48,54.57 +125.66,58.43,55.7 +125.61,58.6,55.75 +126.82,58.89,55.34 +128.51,59.29,55.74 +129.62,58.85,55.69 +132.07,59.4,56.21 +130.75,59.57,56.2 +125.22,59.35,56.69 +125.16,58.85,56.56 +124.5,59.5,57.29 +122.77,58.71,56.98 +123.38,59.11,57.14 +122.99,59.58,57.51 +122.37,59.86,58.06 +121.3,60.16,57.93 +118.44,59.76,58.19 +114.64,60.22,58.7 +115.4,64.04,59.01 +115.13,63.8,57.23 +115.52,64.19,57.2 +119.72,63.99,56.27 +113.49,63.35,56.35 +115.24,64.6,56.38 +115.15,64.34,56.85 +115.96,64.98,57.1 +117.16,65.27,57.74 +116.5,65.77,57.83 +115.01,65.35,57.59 +112.65,63.89,55.81 +105.76,62.45,52.84 +103.12,60.79,50.34 +103.74,60.44,51.09 +109.69,63.14,53.96 +112.92,64.29,55.95 +113.29,64.55,55.63 +112.76,64.82,54.71 +107.72,63.85,53.5 +112.34,64.72,55.26 +110.37,65.11,54.69 +109.27,66.31,54.28 +112.31,69.61,55.21 +110.15,68.3,54.69 +112.57,69.09,55.37 +114.21,67.08,56.53 +115.31,66.84,56.29 +116.28,67.15,56.91 +116.41,67.47,57.26 +113.92,67.03,57.28 +113.45,67.09,56.84 +115.21,67.05,57.54 +113.4,66.58,57.12 +114.32,67.8,57.79 +115.0,67.91,58.37 +114.71,69.2,57.99 +112.44,67.93,55.77 +109.06,67.45,55.72 +110.3,68.38,56.84 +109.58,67.76,57.48 +110.38,68.4,58.08 +110.78,69.75,59.04 +111.31,69.19,58.69 +110.78,69.79,58.78 +109.5,69.5,59.46 +112.12,68.78,60.07 +111.6,69.43,60.54 +111.79,69.04,60.16 +110.21,68.7,58.82 +111.86,69.27,59.69 +111.04,69.26,59.93 +111.73,69.03,60.97 +113.77,69.48,60.88 +113.76,69.47,60.53 +115.5,70.48,61.49 +119.08,70.48,62.61 +115.28,70.05,63.43 +114.55,69.96,62.71 +119.27,70.37,63.51 +120.53,70.13,62.5 +119.5,69.97,62.57 +121.18,70.73,62.24 +122.57,71.36,62.8 +122.0,65.24,61.96 +120.92,67.4,62.28 +121.06,68.01,61.97 +120.57,68.2,61.34 +116.77,68.34,62.18 +116.11,70.02,61.87 +115.72,69.44,61.07 +112.34,69.03,59.74 +114.175,70.02,60.68 +113.69,71.05,60.55 +117.29,71.98,61.8 +118.78,72.45,61.46 +119.3,72.19,61.99 +117.75,72.24,62.64 +118.88,71.96,61.96 +118.03,71.83,62.19 +117.81,72.02,62.18 +118.3,71.78,61.39 +117.34,72.05,61.37 +116.28,71.89,61.22 +115.2,71.08,59.55 +119.03,72.11,61.75 +118.28,70.38,61.89 +118.23,69.75,62.16 +115.62,69.31,61.18 +116.17,69.37,61.87 +113.18,68.61,59.82 +112.48,68.14,59.92 +110.49,69.13,59.98 +111.34,69.52,60.35 +108.98,68.56,59.515 +106.03,67.58,58.62 +107.33,68.03,59.54 +107.23,68.87,59.99 +108.61,69.21,60.34 +108.03,69.06,60.32 +106.82,69.18,60.19 +108.74,69.64,61.13 +107.32,69.3,60.82 +105.26,68.45,60.03 +105.35,67.13,58.26 +102.71,66.39,58.65 +100.7,65.43,58.13 +96.45,64.11,56.69 +96.96,64.25,56.63 +98.53,64.37,57.82 +99.96,64.91,59.46 +97.39,63.37,57.87 +99.52,63.11,58.98 +97.13,61.59,58.0 +96.66,61.13,58.55 +96.79,60.36,56.92 +96.3,60.82,59.03 +101.42,62.04,59.17 +99.44,62.42,57.71 +99.99,63.16,58.61 +93.42,64.8,57.63 +94.09,64.74,59.285 +97.34,66.77,60.77 +96.43,66.85,61.4 +94.48,64.32,60.695 +96.35,64.88,59.53 +96.6,64.25,58.29 +94.02,62.82,54.49 +95.01,62.09,54.14 +94.99,62.24,54.42 +94.27,60.97,55.14 +93.7,60.52,54.92 +93.99,61.78,55.86 +96.64,63.42,56.41 +98.12,65.05,57.63 +96.26,64.78,56.96 +96.04,66.0,57.67 +96.88,66.75,58.87 +94.69,70.78,58.46 +96.1,72.84,58.11 +96.76,74.06,58.75 +96.91,74.86,58.34 +96.69,73.49,58.21 +100.53,71.19,60.04 +100.75,71.28,59.56 +101.5,71.25,59.04 +103.01,70.95,58.7 +101.87,71.01,58.0 +101.03,71.1,57.6 +101.12,71.48,57.07 +101.17,71.22,57.52 +102.26,71.2,57.59 +102.52,71.83,58.65 +104.58,71.97,59.08 +105.97,72.24,59.67 +105.8,72.83,59.55 +105.92,72.59,59.7 +105.91,73.12,59.1 +106.72,73.71,59.38 +106.13,73.15,58.83 +105.67,72.59,58.36 +105.19,73.37,58.96 +107.68,74.09,59.55 +109.56,74.89,60.01 +108.99,75.7,59.7 +109.99,76.11,61.02 +111.12,76.32,60.25 +109.81,75.71,60.04 +110.96,76.09,60.83 +108.54,74.99,61.17 +108.66,75.24,61.04 +109.02,74.88,60.9 +110.44,75.04,59.5 +112.04,75.37,60.21 +112.1,75.31,60.13 +109.85,75.64,60.51 +107.48,75.69,60.89 +106.91,75.97,60.9 +107.13,75.55,60.9 +105.97,74.99,60.64 +105.68,75.56,57.68 +105.08,75.51,57.77 +104.35,75.9,57.72 +97.82,76.04,56.9 +94.83,75.34,56.42 +93.74,75.19,56.23 +93.64,76.0,57.36 +95.18,74.96,56.25 +94.19,74.22,56.39 +93.24,74.25,56.25 +92.72,70.54,56.31 +92.79,70.82,56.64 +93.42,71.05,57.49 +92.51,70.07,56.23 +90.34,71.11,56.3 +90.52,70.62,55.82 +93.88,70.83,55.53 +93.49,69.89,54.88 +94.56,69.46,54.8 +94.2,68.72,54.55 +95.22,68.75,54.62 +96.43,68.78,54.6 +97.9,69.68,55.44 +99.62,69.35,55.15 +100.41,69.4,55.29 +100.35,69.5,55.15 +99.86,69.27,54.89 +98.46,69.06,54.82 +97.72,68.8,54.62 +97.92,68.47,54.61 +98.63,68.77,55.59 +99.03,68.16,55.3 +98.94,69.05,55.22 +99.65,68.56,55.58 +98.83,67.45,54.865 +97.34,66.82,55.04 +97.46,67.24,55.57 +97.14,67.54,55.35 +97.55,67.8,55.53 +95.33,67.33,55.31 +95.1,68.35,55.38 +95.91,67.81,55.81 +95.55,67.43,55.61 +96.1,68.01,56.13 +93.4,64.73,54.68 +92.04,63.08,53.69 +93.59,63.69,54.85 +94.4,64.55,56.74 +95.6,65.97,57.12 +95.89,66.01,56.99 +94.99,64.77,56.77 +95.53,65.3,56.75 +95.94,65.05,56.91 +96.68,66.38,56.51 +96.98,66.62,56.32 +97.42,67.4,57.48 +96.87,67.46,56.48 +98.79,67.58,57.59 +98.78,67.4,57.41 +99.83,67.55,56.92 +99.87,67.5,56.76 +99.96,67.93,57.54 +99.43,67.55,57.6 +98.66,68.25,57.9 +97.34,68.09,57.95 +96.67,68.42,58.31 +102.95,69.26,57.85 +104.34,69.58,58.21 +104.21,69.38,58.05 +106.05,69.63,57.63 +104.48,68.84,56.73 +105.79,69.29,55.94 +105.87,70.24,55.42 +107.48,73.5,55.9 +108.37,73.93,55.36 +108.81,74.28,55.2 +108.0,74.28,55.62 +107.93,75.52,55.47 +108.18,74.54,55.47 +109.48,75.44,55.25 +109.38,75.58,55.37 +109.22,75.68,55.8 +109.08,75.99,55.53 +109.36,76.34,54.94 +108.51,76.49,55.85 +108.85,76.99,56.4 +108.03,77.12,57.09 +107.57,77.18,57.29 +106.94,77.2,57.29 +106.82,77.29,56.8 +106.0,77.51,56.4 +106.1,76.99,56.23 +106.73,76.8,56.31 +107.73,77.95,56.18 +107.7,78.32,56.02 +108.36,78.08,56.32 +105.52,77.37,55.3 +103.13,76.65,54.35 +105.44,77.23,54.71 +107.95,76.09,53.98 +111.77,75.47,53.9 +115.57,76.04,54.11 +114.92,75.63,53.74 +113.58,75.76,53.01 +113.57,75.21,53.3 +113.55,75.73,53.98 +114.62,76.19,54.39 +112.71,76.11,54.43 +112.88,75.95,54.04 +113.09,76.32,54.19 +113.95,76.79,53.98 +112.18,77.21,53.45 +113.05,76.28,54.14 +112.52,75.25,53.84 +113.0,74.42,53.53 +113.05,74.35,53.35 +113.89,74.64,53.14 +114.06,74.48,53.46 +116.05,74.67,53.3 +116.3,73.5,52.92 +117.34,73.76,53.16 +116.98,73.06,52.95 +117.63,73.58,53.08 +117.55,73.13,52.76 +117.47,73.8,52.61 +117.12,73.8,53.15 +117.06,73.57,53.59 +116.6,73.62,53.63 +117.65,74.49,54.18 +118.25,74.16,53.67 +115.59,73.58,53.63 +114.48,73.48,53.59 +113.72,72.83,53.53 +113.54,72.58,53.07 +111.49,72.32,52.5 +111.59,71.57,52.98 +109.83,71.29,51.77 +108.84,75.9,52.75 +110.41,77.71,54.49 +111.06,78.56,54.62 +110.88,78.96,54.58 +107.79,79.19,53.57 +108.43,80.38,53.93 +105.71,80.6,54.22 +107.11,81.8,54.59 +109.99,80.51,55.44 +109.95,80.35,55.85 +110.06,79.98,55.77 +111.73,79.83,56.1 +111.8,80.31,57.12 +111.23,80.26,57.59 +111.79,80.98,57.43 +111.57,80.86,57.59 +111.46,81.11,58.17 +110.52,80.25,57.97 +109.49,79.19,58.51 +109.9,79.5,57.21 +109.11,80.92,57.5 +109.95,82.22,57.44 +111.03,83.27,58.76 +112.12,83.3,58.65 +113.95,82.79,58.75 +113.3,82.6,58.77 +115.19,83.24,59.31 +115.19,82.9,58.75 +115.82,83.46,57.71 +115.97,83.4,57.66 +116.64,83.93,57.65 +116.95,83.76,57.7 +117.06,84.0,57.44 +116.29,83.72,57.11 +116.52,83.41,57.01 +117.26,83.52,56.86 +116.76,82.86,56.35 +116.73,82.87,56.32 +115.82,82.89,55.52 +116.15,83.6,55.35 +116.02,83.49,55.99 +116.61,82.64,56.46 +117.91,82.89,57.13 +118.99,83.02,58.2 +119.11,82.63,57.88 +119.75,82.88,58.1 +119.25,82.18,58.03 +119.04,82.27,57.85 +120.0,80.73,58.0 +119.99,81.65,58.45 +119.78,81.86,57.89 +120.0,82.36,57.66 +120.08,82.44,57.76 +119.97,84.35,58.44 +121.88,85.29,58.7 +121.94,83.36,58.46 +121.95,82.98,56.12 +121.63,81.7,55.9 +121.35,80.71,55.22 +128.75,80.03,53.9 +128.53,81.0,53.87 +129.08,81.6,55.06 +130.29,81.73,55.73 +131.53,77.34,55.24 +132.04,78.25,55.22 +132.42,77.81,55.81 +132.12,78.37,56.22 +133.29,78.48,56.11 +135.02,78.68,56.58 +135.51,79.4,56.86 +135.345,78.66,56.73 +135.72,79.31,57.35 +136.7,80.15,57.54 +137.11,79.65,57.57 +136.53,79.36,57.64 +136.66,80.27,57.48 +136.93,79.28,56.78 +136.99,78.97,56.87 +139.79,79.98,57.14 +138.96,80.02,57.12 +139.78,80.55,57.1 +139.34,79.97,56.68 +139.52,79.66,56.2 +139.0,80.2,55.74 +138.68,81.37,55.19 +139.14,82.1,54.53 +139.2,81.65,54.63 +138.99,83.36,54.27 +140.46,85.24,54.54 +140.69,85.15,54.8 +139.99,84.72,55.78 +141.46,84.3,55.81 +139.84,83.76,55.54 +141.42,83.59,55.89 +140.92,83.74,55.85 +140.64,83.67,56.81 +140.88,84.0,57.23 +143.8,84.0,57.35 +144.12,84.13,57.54 +143.93,84.87,58.16 +143.66,86.22,58.39 +143.7,84.83,58.44 +144.77,84.52,58.32 +144.02,83.83,58.22 +143.66,84.2,57.92 +143.34,84.25,58.02 +143.17,83.71,57.95 +141.63,83.45,57.88 +141.8,82.84,57.58 +141.05,82.34,57.51 +141.83,83.08,58.08 +141.2,82.64,58.35 +140.68,83.37,59.04 +142.44,84.1,60.08 +142.27,83.72,60.61 +143.64,84.72,61.11 +144.53,85.39,60.96 +143.68,85.38,61.56 +143.79,86.07,61.3 +143.65,85.97,60.06 +146.58,86.16,60.18 +147.51,85.92,60.5 +147.06,86.37,60.59 +146.53,86.1,60.83 +148.96,84.44,60.95 +153.01,83.59,60.94 +153.99,84.77,60.98 +153.26,85.77,60.66 +153.95,85.36,60.27 +156.1,84.21,59.93 +155.7,84.48,60.45 +155.47,83.7,59.98 +150.25,81.85,59.73 +152.54,80.83,59.82 +153.06,80.83,61.36 +153.99,82.93,61.23 +153.8,82.11,61.15 +153.34,82.22,61.89 +153.87,82.27,62.9 +153.61,81.86,63.3 +153.67,82.83,63.26 +152.76,83.57,63.61 +153.18,85.64,63.75 +155.45,86.62,64.57 +153.93,87.31,64.27 +154.45,87.46,64.16 +155.37,86.18,63.5 +154.99,86.65,62.24 +148.98,86.17,62.19 +145.42,86.11,61.29 +146.59,86.04,60.92 +145.16,84.87,60.27 +144.29,84.45,60.09 +142.27,84.72,60.14 +146.34,86.2,60.9 +145.01,85.74,59.86 +145.87,86.24,59.96 +145.63,87.36,59.51 +146.28,88.64,59.81 +145.82,88.42,59.64 +143.73,87.72,58.96 +145.83,88.13,59.18 +143.68,86.8,58.36 +144.02,86.74,58.31 +143.5,86.68,58.25 +144.09,86.83,57.94 +142.73,85.96,57.6 +144.18,87.31,58.04 +145.06,87.23,57.81 +145.53,87.65,57.9 +145.74,88.65,58.54 +147.77,88.33,58.38 +149.04,88.61,58.76 +149.56,88.28,58.33 +150.08,88.45,58.21 +151.02,89.78,58.11 +150.34,89.96,58.03 +150.27,90.52,57.98 +152.09,90.67,58.02 +152.74,91.39,58.55 +153.46,91.84,57.94 +150.56,92.21,59.5 +149.5,91.01,54.0 +148.73,90.68,53.98 +158.59,90.43,54.73 +157.14,90.43,55.43 +155.57,90.4,55.68 +156.39,90.37,55.44 +158.81,89.2,55.63 +160.08,88.58,54.52 +161.06,88.51,53.74 +155.32,86.99,53.07 +157.48,87.48,53.18 +159.85,88.6,53.22 +161.6,87.92,53.15 +160.95,88.19,53.5 +157.86,87.13,53.04 +157.5,87.37,52.7 +157.21,87.2,53.15 +159.78,86.41,54.45 +159.98,86.21,54.08 +159.27,86.51,53.94 +159.86,86.88,54.36 +161.47,87.3,54.4 +162.91,86.94,54.1 +163.35,87.66,54.52 +164.0,88.12,54.86 +164.05,87.87,54.93 +162.08,86.66,55.13 +161.91,85.72,54.31 +161.26,86.35,53.47 +158.63,85.12,53.49 +161.5,87.01,54.02 +160.86,87.74,53.54 +159.65,85.97,54.29 +158.28,84.58,54.53 +159.88,85.48,54.67 +158.67,85.48,54.69 +158.73,85.84,54.62 +156.07,85.65,55.15 +153.39,84.99,55.01 +151.89,84.29,55.09 +150.55,83.5,54.95 +153.14,83.02,55.13 +154.23,84.1,54.99 +153.28,83.69,54.5 +154.12,84.87,53.71 +153.81,85.84,53.81 +154.48,85.69,53.99 +153.48,85.64,53.93 +155.39,86.0,54.6 +155.3,89.44,55.17 +155.84,89.1,55.02 +155.9,89.08,55.42 +156.55,89.26,55.64 +156.0,89.8,55.97 +156.99,89.93,55.72 +159.88,89.36,54.91 +160.47,88.88,54.51 +159.76,89.09,55.21 +155.98,89.65,55.4 +156.25,90.0,54.57 +156.17,89.94,54.27 +157.1,90.23,54.28 +156.41,90.04,54.16 +157.41,90.28,54.91 +163.05,91.19,54.88 +166.72,90.37,55.17 +169.04,90.54,54.84 +166.89,90.56,55.13 +168.11,90.02,54.87 +172.5,94.25,56.03 +174.25,92.43,56.57 +174.81,92.11,57.22 +176.24,92.66,57.91 +175.88,91.61,57.36 +174.67,91.07,57.04 +173.97,91.37,56.64 +171.34,91.02,56.93 +169.08,90.39,56.7 +171.1,90.97,57.24 +170.15,90.95,56.93 +169.98,92.33,56.81 +173.14,92.45,57.26 +174.96,91.83,57.14 +174.97,92.36,56.8 +174.09,92.88,55.91 +173.07,94.53,56.66 +169.48,94.17,57.51 +171.85,94.11,57.82 +171.05,93.03,57.32 +169.8,93.63,58.76 +169.64,90.66,59.34 +169.01,91.29,59.28 +169.32,92.8,59.14 +169.37,92.52,58.61 +172.67,92.33,59.07 +171.7,93.37,59.27 +172.27,93.94,59.49 +172.22,92.2,59.7 +173.97,93.15,58.29 +176.42,94.49,58.03 +174.54,93.28,58.01 +174.35,92.1,57.73 +175.01,91.62,57.58 +175.01,90.76,57.3 +170.57,90.67,57.14 +170.6,90.8,57.27 +171.08,90.57,57.81 +169.23,90.34,57.43 +172.26,90.55,57.63 +172.23,89.91,58.71 +173.03,90.66,58.93 +175.0,91.88,59.61 +174.35,92.82,59.31 +174.33,92.12,59.18 +174.29,92.38,59.82 +175.28,93.55,60.0 +177.09,96.57,60.4 +176.19,95.86,60.56 +179.1,97.28,60.66 +179.26,97.5,61.09 +178.46,97.8,61.26 +177.0,97.33,61.41 +177.04,96.76,61.69 +174.22,95.84,60.83 +171.11,97.68,60.55 +171.51,99.0,57.99 +167.96,99.18,57.02 +166.97,99.8,57.19 +167.43,99.46,56.81 +167.78,99.12,56.0 +160.5,103.87,55.77 +156.49,101.06,54.69 +163.03,102.76,55.61 +159.54,102.63,54.46 diff --git a/pytorch/ann_regression.py b/pytorch/ann_regression.py new file mode 100644 index 00000000..65d66250 --- /dev/null +++ b/pytorch/ann_regression.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +"""PyTorch Regression.ipynb + +Automatically generated by Colaboratory. + +Original file is located at + https://colab.research.google.com/drive/1pEjzEmbnu2wXAhIaBS8PSpi-0cWtR6ov +""" + +import torch +import torch.nn as nn +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +# Make the dataset +N = 1000 +X = np.random.random((N, 2)) * 6 - 3 # uniformly distributed between (-3, +3) +Y = np.cos(2*X[:,0]) + np.cos(3*X[:,1]) + +# Plot it +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], Y) +plt.show() + +# Build the model +model = nn.Sequential( + nn.Linear(2, 128), + nn.ReLU(), + nn.Linear(128, 1) +) + +# Loss and optimizer +criterion = nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +# Train the model +def full_gd(model, criterion, optimizer, X_train, y_train, epochs=1000): + # Stuff to store + train_losses = np.zeros(epochs) + + for it in range(epochs): + # zero the parameter gradients + optimizer.zero_grad() + + # Forward pass + outputs = model(X_train) + loss = criterion(outputs, y_train) + + # Backward and optimize + loss.backward() + optimizer.step() + + # Save losses + train_losses[it] = loss.item() + + if (it + 1) % 50 == 0: + print(f'Epoch {it+1}/{epochs}, Train Loss: {loss.item():.4f}') + + return train_losses + +X_train = torch.from_numpy(X.astype(np.float32)) +y_train = torch.from_numpy(Y.astype(np.float32).reshape(-1, 1)) +train_losses = full_gd(model, criterion, optimizer, X_train, y_train) + +plt.plot(train_losses) +plt.show() + + +# Plot the prediction surface +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], Y) + +# surface plot +with torch.no_grad(): + line = np.linspace(-3, 3, 50) + xx, yy = np.meshgrid(line, line) + Xgrid = np.vstack((xx.flatten(), yy.flatten())).T + Xgrid_torch = torch.from_numpy(Xgrid.astype(np.float32)) + Yhat = model(Xgrid_torch).numpy().flatten() + ax.plot_trisurf(Xgrid[:,0], Xgrid[:,1], Yhat, linewidth=0.2, antialiased=True) + plt.show() + +# Can it extrapolate? +# Plot the prediction surface +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.scatter(X[:,0], X[:,1], Y) + +# surface plot +with torch.no_grad(): + line = np.linspace(-5, 5, 50) + xx, yy = np.meshgrid(line, line) + Xgrid = np.vstack((xx.flatten(), yy.flatten())).T + Xgrid_torch = torch.from_numpy(Xgrid.astype(np.float32)) + Yhat = model(Xgrid_torch).numpy().flatten() + ax.plot_trisurf(Xgrid[:,0], Xgrid[:,1], Yhat, linewidth=0.2, antialiased=True) + plt.show() \ No newline at end of file diff --git a/pytorch/extra_reading.txt b/pytorch/extra_reading.txt new file mode 100644 index 00000000..7d5afcf1 --- /dev/null +++ b/pytorch/extra_reading.txt @@ -0,0 +1,27 @@ +Gradient Descent: Convergence Analysis +http://www.stat.cmu.edu/~ryantibs/convexopt-F13/scribes/lec6.pdf + +Deep learning improved by biological activation functions +https://arxiv.org/pdf/1804.11237.pdf + +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift +Sergey Ioffe, Christian Szegedy +https://arxiv.org/abs/1502.03167 + +Dropout: A Simple Way to Prevent Neural Networks from Overfitting +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf + +Convolution arithmetic tutorial +http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html + +On the Practical Computational Power of Finite Precision RNNs for Language Recognition +https://arxiv.org/abs/1805.04908 + +Massive Exploration of Neural Machine Translation Architectures +https://arxiv.org/abs/1703.03906 + +Practical Deep Reinforcement Learning Approach for Stock Trading +https://arxiv.org/abs/1811.07522 + +Inceptionism: Going Deeper into Neural Networks +https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html \ No newline at end of file diff --git a/pytorch/plot_rl_rewards.py b/pytorch/plot_rl_rewards.py new file mode 100644 index 00000000..85cc1b2e --- /dev/null +++ b/pytorch/plot_rl_rewards.py @@ -0,0 +1,16 @@ +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') +args = parser.parse_args() + +a = np.load(f'rl_trader_rewards/{args.mode}.npy') + +print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") + +plt.hist(a, bins=20) +plt.title(args.mode) +plt.show() \ No newline at end of file diff --git a/pytorch/rl_trader.py b/pytorch/rl_trader.py new file mode 100644 index 00000000..fbb96c91 --- /dev/null +++ b/pytorch/rl_trader.py @@ -0,0 +1,441 @@ +import numpy as np +import pandas as pd + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + +from sklearn.preprocessing import StandardScaler + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +class MLP(nn.Module): + def __init__(self, n_inputs, n_action, n_hidden_layers=1, hidden_dim=32): + super(MLP, self).__init__() + + M = n_inputs + self.layers = [] + for _ in range(n_hidden_layers): + layer = nn.Linear(M, hidden_dim) + M = hidden_dim + self.layers.append(layer) + self.layers.append(nn.ReLU()) + + # final layer + self.layers.append(nn.Linear(M, n_action)) + self.layers = nn.Sequential(*self.layers) + + def forward(self, X): + return self.layers(X) + + def save_weights(self, path): + torch.save(self.state_dict(), path) + + def load_weights(self, path): + self.load_state_dict(torch.load(path)) + + + +def predict(model, np_states): + with torch.no_grad(): + inputs = torch.from_numpy(np_states.astype(np.float32)) + output = model(inputs) + # print("output:", output) + return output.numpy() + + + +def train_one_step(model, criterion, optimizer, inputs, targets): + # convert to tensors + inputs = torch.from_numpy(inputs.astype(np.float32)) + targets = torch.from_numpy(targets.astype(np.float32)) + + # zero the parameter gradients + optimizer.zero_grad() + + # Forward pass + outputs = model(inputs) + loss = criterion(outputs, targets) + + # Backward and optimize + loss.backward() + optimizer.step() + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = MLP(state_size, action_size) + + # Loss and optimizer + self.criterion = nn.MSELoss() + self.optimizer = torch.optim.Adam(self.model.parameters()) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = predict(self.model, state) + return np.argmax(act_values[0]) # returns action + + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states), axis=1) + + # With the PyTorch API, it is simplest to have the target be the + # same shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = predict(self.model, states) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + train_one_step(self.model, self.criterion, self.optimizer, states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/dqn.ckpt') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/dqn.ckpt') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) From e3b4d0bcd14c99e6b938d578802bb919018cd298 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 2 Apr 2020 14:32:35 -0400 Subject: [PATCH 174/329] keras update acc > accuracy --- ann_class2/keras_example.py | 4 ++-- ann_class2/keras_functional.py | 4 ++-- cnn_class/keras_example.py | 4 ++-- cnn_class2/fashion.py | 4 ++-- cnn_class2/fashion2.py | 4 ++-- cnn_class2/use_pretrained_weights_resnet.py | 4 ++-- cnn_class2/use_pretrained_weights_vgg.py | 15 +++++++-------- nlp_class2/pos_ner_keras.py | 4 ++-- nlp_class3/attention.py | 15 ++++++++------- nlp_class3/bilstm_mnist.py | 4 ++-- nlp_class3/cnn_toxic.py | 8 ++++++-- nlp_class3/lstm_toxic.py | 12 ++++++------ nlp_class3/memory_network.py | 4 ++-- nlp_class3/poetry.py | 4 ++-- nlp_class3/wseq2seq.py | 9 +++++---- 15 files changed, 52 insertions(+), 47 deletions(-) diff --git a/ann_class2/keras_example.py b/ann_class2/keras_example.py index 0fb28736..aa9a5e19 100644 --- a/ann_class2/keras_example.py +++ b/ann_class2/keras_example.py @@ -75,8 +75,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/ann_class2/keras_functional.py b/ann_class2/keras_functional.py index b8c1a793..265d3f9b 100644 --- a/ann_class2/keras_functional.py +++ b/ann_class2/keras_functional.py @@ -70,8 +70,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class/keras_example.py b/cnn_class/keras_example.py index 4667d84f..d0463588 100644 --- a/cnn_class/keras_example.py +++ b/cnn_class/keras_example.py @@ -113,8 +113,8 @@ def rearrange(X): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class2/fashion.py b/cnn_class2/fashion.py index 858be20e..f1be1654 100644 --- a/cnn_class2/fashion.py +++ b/cnn_class2/fashion.py @@ -101,8 +101,8 @@ def y2indicator(Y): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class2/fashion2.py b/cnn_class2/fashion2.py index 4d2d22d6..d035e974 100644 --- a/cnn_class2/fashion2.py +++ b/cnn_class2/fashion2.py @@ -96,8 +96,8 @@ def y2indicator(Y): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/cnn_class2/use_pretrained_weights_resnet.py b/cnn_class2/use_pretrained_weights_resnet.py index 48c6bc23..39bed211 100644 --- a/cnn_class2/use_pretrained_weights_resnet.py +++ b/cnn_class2/use_pretrained_weights_resnet.py @@ -173,8 +173,8 @@ def get_confusion_matrix(data_path, N): plt.show() # accuracies -plt.plot(r.history['acc'], label='train acc') -plt.plot(r.history['val_acc'], label='val acc') +plt.plot(r.history['accuracy'], label='train acc') +plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() diff --git a/cnn_class2/use_pretrained_weights_vgg.py b/cnn_class2/use_pretrained_weights_vgg.py index 01cbd619..542bcb48 100644 --- a/cnn_class2/use_pretrained_weights_vgg.py +++ b/cnn_class2/use_pretrained_weights_vgg.py @@ -31,10 +31,10 @@ # valid_path = '../large_files/blood_cell_images/TEST' # https://www.kaggle.com/moltean/fruits -# train_path = '../large_files/fruits-360/Training' -# valid_path = '../large_files/fruits-360/Validation' -train_path = '../large_files/fruits-360-small/Training' -valid_path = '../large_files/fruits-360-small/Validation' +train_path = '../large_files/fruits-360/Training' +valid_path = '../large_files/fruits-360/Validation' +# train_path = '../large_files/fruits-360-small/Training' +# valid_path = '../large_files/fruits-360-small/Validation' # useful for getting number of files image_files = glob(train_path + '/*/*.jp*g') @@ -45,7 +45,7 @@ # look at an image for fun -plt.imshow(image.load_img(np.random.choice(image_files))) +plt.imshow(image.img_to_array(image.load_img(np.random.choice(image_files))).astype('uint8')) plt.show() @@ -76,7 +76,6 @@ ) - # create an instance of ImageDataGenerator gen = ImageDataGenerator( rotation_range=20, @@ -172,8 +171,8 @@ def get_confusion_matrix(data_path, N): plt.show() # accuracies -plt.plot(r.history['acc'], label='train acc') -plt.plot(r.history['val_acc'], label='val acc') +plt.plot(r.history['accuracy'], label='train acc') +plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() diff --git a/nlp_class2/pos_ner_keras.py b/nlp_class2/pos_ner_keras.py index a150fc29..7a1335e1 100644 --- a/nlp_class2/pos_ner_keras.py +++ b/nlp_class2/pos_ner_keras.py @@ -217,8 +217,8 @@ def get_data_ner(split_sequences=False): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index 8e7e3735..550b5bcb 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -34,10 +34,10 @@ def softmax_over_time(x): # config BATCH_SIZE = 64 -EPOCHS = 100 -LATENT_DIM = 256 -LATENT_DIM_DECODER = 256 # idea: make it different to ensure things all fit together properly! -NUM_SAMPLES = 10000 +EPOCHS = 30 +LATENT_DIM = 400 +LATENT_DIM_DECODER = 400 # idea: make it different to ensure things all fit together properly! +NUM_SAMPLES = 20000 MAX_SEQUENCE_LENGTH = 100 MAX_NUM_WORDS = 20000 EMBEDDING_DIM = 100 @@ -190,7 +190,8 @@ def softmax_over_time(x): # assign the values for i, d in enumerate(decoder_targets): for t, word in enumerate(d): - decoder_targets_one_hot[i, t, word] = 1 + if word > 0: + decoder_targets_one_hot[i, t, word] = 1 @@ -367,8 +368,8 @@ def acc(y_true, y_pred): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/bilstm_mnist.py b/nlp_class3/bilstm_mnist.py index 4002b2ae..03e3752d 100644 --- a/nlp_class3/bilstm_mnist.py +++ b/nlp_class3/bilstm_mnist.py @@ -97,8 +97,8 @@ def get_mnist(limit=None): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/cnn_toxic.py b/nlp_class3/cnn_toxic.py index 5ecf0443..57d86d1e 100644 --- a/nlp_class3/cnn_toxic.py +++ b/nlp_class3/cnn_toxic.py @@ -70,11 +70,15 @@ s = sorted(len(s) for s in sequences) print("median sequence length:", s[len(s) // 2]) +print("max word index:", max(max(seq) for seq in sequences if len(seq) > 0)) + # get word -> integer mapping word2idx = tokenizer.word_index print('Found %s unique tokens.' % len(word2idx)) +# exit() + # pad sequences so that we get a N x T matrix data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) @@ -144,8 +148,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/lstm_toxic.py b/nlp_class3/lstm_toxic.py index 8dc41f97..71f4947d 100644 --- a/nlp_class3/lstm_toxic.py +++ b/nlp_class3/lstm_toxic.py @@ -20,9 +20,9 @@ from sklearn.metrics import roc_auc_score import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +# if len(K.tensorflow_backend._get_available_gpus()) > 0: +# from keras.layers import CuDNNLSTM as LSTM +# from keras.layers import CuDNNGRU as GRU # Download the data: @@ -124,7 +124,7 @@ model.compile( loss='binary_crossentropy', optimizer=Adam(lr=0.01), - metrics=['accuracy'] + metrics=['accuracy'], ) @@ -144,8 +144,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/memory_network.py b/nlp_class3/memory_network.py index a5603f5c..52f4c291 100644 --- a/nlp_class3/memory_network.py +++ b/nlp_class3/memory_network.py @@ -425,7 +425,7 @@ def hop(query, story): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/poetry.py b/nlp_class3/poetry.py index 5b3cfac7..7dabafe3 100644 --- a/nlp_class3/poetry.py +++ b/nlp_class3/poetry.py @@ -160,8 +160,8 @@ plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index e83d669a..52c0a65e 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -165,7 +165,8 @@ # assign the values for i, d in enumerate(decoder_targets): for t, word in enumerate(d): - decoder_targets_one_hot[i, t, word] = 1 + if word != 0: + decoder_targets_one_hot[i, t, word] = 1 @@ -190,7 +191,7 @@ # this word embedding will not use pre-trained vectors # although you could -decoder_embedding = Embedding(num_words_output, LATENT_DIM) +decoder_embedding = Embedding(num_words_output, EMBEDDING_DIM) decoder_inputs_x = decoder_embedding(decoder_inputs_placeholder) # since the decoder is a "to-many" model we want to have @@ -263,8 +264,8 @@ def acc(y_true, y_pred): plt.show() # accuracies -plt.plot(r.history['acc'], label='acc') -plt.plot(r.history['val_acc'], label='val_acc') +plt.plot(r.history['accuracy'], label='acc') +plt.plot(r.history['val_accuracy'], label='val_acc') plt.legend() plt.show() From 62c20fd1f8833f4671f579baf57222c1aa9881bc Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 2 Apr 2020 23:06:57 -0400 Subject: [PATCH 175/329] add file --- tf2.0/auto-mpg.data | 398 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 398 insertions(+) create mode 100644 tf2.0/auto-mpg.data diff --git a/tf2.0/auto-mpg.data b/tf2.0/auto-mpg.data new file mode 100644 index 00000000..33404b06 --- /dev/null +++ b/tf2.0/auto-mpg.data @@ -0,0 +1,398 @@ +18.0 8 307.0 130.0 3504. 12.0 70 1 "chevrolet chevelle malibu" +15.0 8 350.0 165.0 3693. 11.5 70 1 "buick skylark 320" +18.0 8 318.0 150.0 3436. 11.0 70 1 "plymouth satellite" +16.0 8 304.0 150.0 3433. 12.0 70 1 "amc rebel sst" +17.0 8 302.0 140.0 3449. 10.5 70 1 "ford torino" +15.0 8 429.0 198.0 4341. 10.0 70 1 "ford galaxie 500" +14.0 8 454.0 220.0 4354. 9.0 70 1 "chevrolet impala" +14.0 8 440.0 215.0 4312. 8.5 70 1 "plymouth fury iii" +14.0 8 455.0 225.0 4425. 10.0 70 1 "pontiac catalina" +15.0 8 390.0 190.0 3850. 8.5 70 1 "amc ambassador dpl" +15.0 8 383.0 170.0 3563. 10.0 70 1 "dodge challenger se" +14.0 8 340.0 160.0 3609. 8.0 70 1 "plymouth 'cuda 340" +15.0 8 400.0 150.0 3761. 9.5 70 1 "chevrolet monte carlo" +14.0 8 455.0 225.0 3086. 10.0 70 1 "buick estate wagon (sw)" +24.0 4 113.0 95.00 2372. 15.0 70 3 "toyota corona mark ii" +22.0 6 198.0 95.00 2833. 15.5 70 1 "plymouth duster" +18.0 6 199.0 97.00 2774. 15.5 70 1 "amc hornet" +21.0 6 200.0 85.00 2587. 16.0 70 1 "ford maverick" +27.0 4 97.00 88.00 2130. 14.5 70 3 "datsun pl510" +26.0 4 97.00 46.00 1835. 20.5 70 2 "volkswagen 1131 deluxe sedan" +25.0 4 110.0 87.00 2672. 17.5 70 2 "peugeot 504" +24.0 4 107.0 90.00 2430. 14.5 70 2 "audi 100 ls" +25.0 4 104.0 95.00 2375. 17.5 70 2 "saab 99e" +26.0 4 121.0 113.0 2234. 12.5 70 2 "bmw 2002" +21.0 6 199.0 90.00 2648. 15.0 70 1 "amc gremlin" +10.0 8 360.0 215.0 4615. 14.0 70 1 "ford f250" +10.0 8 307.0 200.0 4376. 15.0 70 1 "chevy c20" +11.0 8 318.0 210.0 4382. 13.5 70 1 "dodge d200" +9.0 8 304.0 193.0 4732. 18.5 70 1 "hi 1200d" +27.0 4 97.00 88.00 2130. 14.5 71 3 "datsun pl510" +28.0 4 140.0 90.00 2264. 15.5 71 1 "chevrolet vega 2300" +25.0 4 113.0 95.00 2228. 14.0 71 3 "toyota corona" +25.0 4 98.00 ? 2046. 19.0 71 1 "ford pinto" +19.0 6 232.0 100.0 2634. 13.0 71 1 "amc gremlin" +16.0 6 225.0 105.0 3439. 15.5 71 1 "plymouth satellite custom" +17.0 6 250.0 100.0 3329. 15.5 71 1 "chevrolet chevelle malibu" +19.0 6 250.0 88.00 3302. 15.5 71 1 "ford torino 500" +18.0 6 232.0 100.0 3288. 15.5 71 1 "amc matador" +14.0 8 350.0 165.0 4209. 12.0 71 1 "chevrolet impala" +14.0 8 400.0 175.0 4464. 11.5 71 1 "pontiac catalina brougham" +14.0 8 351.0 153.0 4154. 13.5 71 1 "ford galaxie 500" +14.0 8 318.0 150.0 4096. 13.0 71 1 "plymouth fury iii" +12.0 8 383.0 180.0 4955. 11.5 71 1 "dodge monaco (sw)" +13.0 8 400.0 170.0 4746. 12.0 71 1 "ford country squire (sw)" +13.0 8 400.0 175.0 5140. 12.0 71 1 "pontiac safari (sw)" +18.0 6 258.0 110.0 2962. 13.5 71 1 "amc hornet sportabout (sw)" +22.0 4 140.0 72.00 2408. 19.0 71 1 "chevrolet vega (sw)" +19.0 6 250.0 100.0 3282. 15.0 71 1 "pontiac firebird" +18.0 6 250.0 88.00 3139. 14.5 71 1 "ford mustang" +23.0 4 122.0 86.00 2220. 14.0 71 1 "mercury capri 2000" +28.0 4 116.0 90.00 2123. 14.0 71 2 "opel 1900" +30.0 4 79.00 70.00 2074. 19.5 71 2 "peugeot 304" +30.0 4 88.00 76.00 2065. 14.5 71 2 "fiat 124b" +31.0 4 71.00 65.00 1773. 19.0 71 3 "toyota corolla 1200" +35.0 4 72.00 69.00 1613. 18.0 71 3 "datsun 1200" +27.0 4 97.00 60.00 1834. 19.0 71 2 "volkswagen model 111" +26.0 4 91.00 70.00 1955. 20.5 71 1 "plymouth cricket" +24.0 4 113.0 95.00 2278. 15.5 72 3 "toyota corona hardtop" +25.0 4 97.50 80.00 2126. 17.0 72 1 "dodge colt hardtop" +23.0 4 97.00 54.00 2254. 23.5 72 2 "volkswagen type 3" +20.0 4 140.0 90.00 2408. 19.5 72 1 "chevrolet vega" +21.0 4 122.0 86.00 2226. 16.5 72 1 "ford pinto runabout" +13.0 8 350.0 165.0 4274. 12.0 72 1 "chevrolet impala" +14.0 8 400.0 175.0 4385. 12.0 72 1 "pontiac catalina" +15.0 8 318.0 150.0 4135. 13.5 72 1 "plymouth fury iii" +14.0 8 351.0 153.0 4129. 13.0 72 1 "ford galaxie 500" +17.0 8 304.0 150.0 3672. 11.5 72 1 "amc ambassador sst" +11.0 8 429.0 208.0 4633. 11.0 72 1 "mercury marquis" +13.0 8 350.0 155.0 4502. 13.5 72 1 "buick lesabre custom" +12.0 8 350.0 160.0 4456. 13.5 72 1 "oldsmobile delta 88 royale" +13.0 8 400.0 190.0 4422. 12.5 72 1 "chrysler newport royal" +19.0 3 70.00 97.00 2330. 13.5 72 3 "mazda rx2 coupe" +15.0 8 304.0 150.0 3892. 12.5 72 1 "amc matador (sw)" +13.0 8 307.0 130.0 4098. 14.0 72 1 "chevrolet chevelle concours (sw)" +13.0 8 302.0 140.0 4294. 16.0 72 1 "ford gran torino (sw)" +14.0 8 318.0 150.0 4077. 14.0 72 1 "plymouth satellite custom (sw)" +18.0 4 121.0 112.0 2933. 14.5 72 2 "volvo 145e (sw)" +22.0 4 121.0 76.00 2511. 18.0 72 2 "volkswagen 411 (sw)" +21.0 4 120.0 87.00 2979. 19.5 72 2 "peugeot 504 (sw)" +26.0 4 96.00 69.00 2189. 18.0 72 2 "renault 12 (sw)" +22.0 4 122.0 86.00 2395. 16.0 72 1 "ford pinto (sw)" +28.0 4 97.00 92.00 2288. 17.0 72 3 "datsun 510 (sw)" +23.0 4 120.0 97.00 2506. 14.5 72 3 "toyouta corona mark ii (sw)" +28.0 4 98.00 80.00 2164. 15.0 72 1 "dodge colt (sw)" +27.0 4 97.00 88.00 2100. 16.5 72 3 "toyota corolla 1600 (sw)" +13.0 8 350.0 175.0 4100. 13.0 73 1 "buick century 350" +14.0 8 304.0 150.0 3672. 11.5 73 1 "amc matador" +13.0 8 350.0 145.0 3988. 13.0 73 1 "chevrolet malibu" +14.0 8 302.0 137.0 4042. 14.5 73 1 "ford gran torino" +15.0 8 318.0 150.0 3777. 12.5 73 1 "dodge coronet custom" +12.0 8 429.0 198.0 4952. 11.5 73 1 "mercury marquis brougham" +13.0 8 400.0 150.0 4464. 12.0 73 1 "chevrolet caprice classic" +13.0 8 351.0 158.0 4363. 13.0 73 1 "ford ltd" +14.0 8 318.0 150.0 4237. 14.5 73 1 "plymouth fury gran sedan" +13.0 8 440.0 215.0 4735. 11.0 73 1 "chrysler new yorker brougham" +12.0 8 455.0 225.0 4951. 11.0 73 1 "buick electra 225 custom" +13.0 8 360.0 175.0 3821. 11.0 73 1 "amc ambassador brougham" +18.0 6 225.0 105.0 3121. 16.5 73 1 "plymouth valiant" +16.0 6 250.0 100.0 3278. 18.0 73 1 "chevrolet nova custom" +18.0 6 232.0 100.0 2945. 16.0 73 1 "amc hornet" +18.0 6 250.0 88.00 3021. 16.5 73 1 "ford maverick" +23.0 6 198.0 95.00 2904. 16.0 73 1 "plymouth duster" +26.0 4 97.00 46.00 1950. 21.0 73 2 "volkswagen super beetle" +11.0 8 400.0 150.0 4997. 14.0 73 1 "chevrolet impala" +12.0 8 400.0 167.0 4906. 12.5 73 1 "ford country" +13.0 8 360.0 170.0 4654. 13.0 73 1 "plymouth custom suburb" +12.0 8 350.0 180.0 4499. 12.5 73 1 "oldsmobile vista cruiser" +18.0 6 232.0 100.0 2789. 15.0 73 1 "amc gremlin" +20.0 4 97.00 88.00 2279. 19.0 73 3 "toyota carina" +21.0 4 140.0 72.00 2401. 19.5 73 1 "chevrolet vega" +22.0 4 108.0 94.00 2379. 16.5 73 3 "datsun 610" +18.0 3 70.00 90.00 2124. 13.5 73 3 "maxda rx3" +19.0 4 122.0 85.00 2310. 18.5 73 1 "ford pinto" +21.0 6 155.0 107.0 2472. 14.0 73 1 "mercury capri v6" +26.0 4 98.00 90.00 2265. 15.5 73 2 "fiat 124 sport coupe" +15.0 8 350.0 145.0 4082. 13.0 73 1 "chevrolet monte carlo s" +16.0 8 400.0 230.0 4278. 9.50 73 1 "pontiac grand prix" +29.0 4 68.00 49.00 1867. 19.5 73 2 "fiat 128" +24.0 4 116.0 75.00 2158. 15.5 73 2 "opel manta" +20.0 4 114.0 91.00 2582. 14.0 73 2 "audi 100ls" +19.0 4 121.0 112.0 2868. 15.5 73 2 "volvo 144ea" +15.0 8 318.0 150.0 3399. 11.0 73 1 "dodge dart custom" +24.0 4 121.0 110.0 2660. 14.0 73 2 "saab 99le" +20.0 6 156.0 122.0 2807. 13.5 73 3 "toyota mark ii" +11.0 8 350.0 180.0 3664. 11.0 73 1 "oldsmobile omega" +20.0 6 198.0 95.00 3102. 16.5 74 1 "plymouth duster" +21.0 6 200.0 ? 2875. 17.0 74 1 "ford maverick" +19.0 6 232.0 100.0 2901. 16.0 74 1 "amc hornet" +15.0 6 250.0 100.0 3336. 17.0 74 1 "chevrolet nova" +31.0 4 79.00 67.00 1950. 19.0 74 3 "datsun b210" +26.0 4 122.0 80.00 2451. 16.5 74 1 "ford pinto" +32.0 4 71.00 65.00 1836. 21.0 74 3 "toyota corolla 1200" +25.0 4 140.0 75.00 2542. 17.0 74 1 "chevrolet vega" +16.0 6 250.0 100.0 3781. 17.0 74 1 "chevrolet chevelle malibu classic" +16.0 6 258.0 110.0 3632. 18.0 74 1 "amc matador" +18.0 6 225.0 105.0 3613. 16.5 74 1 "plymouth satellite sebring" +16.0 8 302.0 140.0 4141. 14.0 74 1 "ford gran torino" +13.0 8 350.0 150.0 4699. 14.5 74 1 "buick century luxus (sw)" +14.0 8 318.0 150.0 4457. 13.5 74 1 "dodge coronet custom (sw)" +14.0 8 302.0 140.0 4638. 16.0 74 1 "ford gran torino (sw)" +14.0 8 304.0 150.0 4257. 15.5 74 1 "amc matador (sw)" +29.0 4 98.00 83.00 2219. 16.5 74 2 "audi fox" +26.0 4 79.00 67.00 1963. 15.5 74 2 "volkswagen dasher" +26.0 4 97.00 78.00 2300. 14.5 74 2 "opel manta" +31.0 4 76.00 52.00 1649. 16.5 74 3 "toyota corona" +32.0 4 83.00 61.00 2003. 19.0 74 3 "datsun 710" +28.0 4 90.00 75.00 2125. 14.5 74 1 "dodge colt" +24.0 4 90.00 75.00 2108. 15.5 74 2 "fiat 128" +26.0 4 116.0 75.00 2246. 14.0 74 2 "fiat 124 tc" +24.0 4 120.0 97.00 2489. 15.0 74 3 "honda civic" +26.0 4 108.0 93.00 2391. 15.5 74 3 "subaru" +31.0 4 79.00 67.00 2000. 16.0 74 2 "fiat x1.9" +19.0 6 225.0 95.00 3264. 16.0 75 1 "plymouth valiant custom" +18.0 6 250.0 105.0 3459. 16.0 75 1 "chevrolet nova" +15.0 6 250.0 72.00 3432. 21.0 75 1 "mercury monarch" +15.0 6 250.0 72.00 3158. 19.5 75 1 "ford maverick" +16.0 8 400.0 170.0 4668. 11.5 75 1 "pontiac catalina" +15.0 8 350.0 145.0 4440. 14.0 75 1 "chevrolet bel air" +16.0 8 318.0 150.0 4498. 14.5 75 1 "plymouth grand fury" +14.0 8 351.0 148.0 4657. 13.5 75 1 "ford ltd" +17.0 6 231.0 110.0 3907. 21.0 75 1 "buick century" +16.0 6 250.0 105.0 3897. 18.5 75 1 "chevroelt chevelle malibu" +15.0 6 258.0 110.0 3730. 19.0 75 1 "amc matador" +18.0 6 225.0 95.00 3785. 19.0 75 1 "plymouth fury" +21.0 6 231.0 110.0 3039. 15.0 75 1 "buick skyhawk" +20.0 8 262.0 110.0 3221. 13.5 75 1 "chevrolet monza 2+2" +13.0 8 302.0 129.0 3169. 12.0 75 1 "ford mustang ii" +29.0 4 97.00 75.00 2171. 16.0 75 3 "toyota corolla" +23.0 4 140.0 83.00 2639. 17.0 75 1 "ford pinto" +20.0 6 232.0 100.0 2914. 16.0 75 1 "amc gremlin" +23.0 4 140.0 78.00 2592. 18.5 75 1 "pontiac astro" +24.0 4 134.0 96.00 2702. 13.5 75 3 "toyota corona" +25.0 4 90.00 71.00 2223. 16.5 75 2 "volkswagen dasher" +24.0 4 119.0 97.00 2545. 17.0 75 3 "datsun 710" +18.0 6 171.0 97.00 2984. 14.5 75 1 "ford pinto" +29.0 4 90.00 70.00 1937. 14.0 75 2 "volkswagen rabbit" +19.0 6 232.0 90.00 3211. 17.0 75 1 "amc pacer" +23.0 4 115.0 95.00 2694. 15.0 75 2 "audi 100ls" +23.0 4 120.0 88.00 2957. 17.0 75 2 "peugeot 504" +22.0 4 121.0 98.00 2945. 14.5 75 2 "volvo 244dl" +25.0 4 121.0 115.0 2671. 13.5 75 2 "saab 99le" +33.0 4 91.00 53.00 1795. 17.5 75 3 "honda civic cvcc" +28.0 4 107.0 86.00 2464. 15.5 76 2 "fiat 131" +25.0 4 116.0 81.00 2220. 16.9 76 2 "opel 1900" +25.0 4 140.0 92.00 2572. 14.9 76 1 "capri ii" +26.0 4 98.00 79.00 2255. 17.7 76 1 "dodge colt" +27.0 4 101.0 83.00 2202. 15.3 76 2 "renault 12tl" +17.5 8 305.0 140.0 4215. 13.0 76 1 "chevrolet chevelle malibu classic" +16.0 8 318.0 150.0 4190. 13.0 76 1 "dodge coronet brougham" +15.5 8 304.0 120.0 3962. 13.9 76 1 "amc matador" +14.5 8 351.0 152.0 4215. 12.8 76 1 "ford gran torino" +22.0 6 225.0 100.0 3233. 15.4 76 1 "plymouth valiant" +22.0 6 250.0 105.0 3353. 14.5 76 1 "chevrolet nova" +24.0 6 200.0 81.00 3012. 17.6 76 1 "ford maverick" +22.5 6 232.0 90.00 3085. 17.6 76 1 "amc hornet" +29.0 4 85.00 52.00 2035. 22.2 76 1 "chevrolet chevette" +24.5 4 98.00 60.00 2164. 22.1 76 1 "chevrolet woody" +29.0 4 90.00 70.00 1937. 14.2 76 2 "vw rabbit" +33.0 4 91.00 53.00 1795. 17.4 76 3 "honda civic" +20.0 6 225.0 100.0 3651. 17.7 76 1 "dodge aspen se" +18.0 6 250.0 78.00 3574. 21.0 76 1 "ford granada ghia" +18.5 6 250.0 110.0 3645. 16.2 76 1 "pontiac ventura sj" +17.5 6 258.0 95.00 3193. 17.8 76 1 "amc pacer d/l" +29.5 4 97.00 71.00 1825. 12.2 76 2 "volkswagen rabbit" +32.0 4 85.00 70.00 1990. 17.0 76 3 "datsun b-210" +28.0 4 97.00 75.00 2155. 16.4 76 3 "toyota corolla" +26.5 4 140.0 72.00 2565. 13.6 76 1 "ford pinto" +20.0 4 130.0 102.0 3150. 15.7 76 2 "volvo 245" +13.0 8 318.0 150.0 3940. 13.2 76 1 "plymouth volare premier v8" +19.0 4 120.0 88.00 3270. 21.9 76 2 "peugeot 504" +19.0 6 156.0 108.0 2930. 15.5 76 3 "toyota mark ii" +16.5 6 168.0 120.0 3820. 16.7 76 2 "mercedes-benz 280s" +16.5 8 350.0 180.0 4380. 12.1 76 1 "cadillac seville" +13.0 8 350.0 145.0 4055. 12.0 76 1 "chevy c10" +13.0 8 302.0 130.0 3870. 15.0 76 1 "ford f108" +13.0 8 318.0 150.0 3755. 14.0 76 1 "dodge d100" +31.5 4 98.00 68.00 2045. 18.5 77 3 "honda accord cvcc" +30.0 4 111.0 80.00 2155. 14.8 77 1 "buick opel isuzu deluxe" +36.0 4 79.00 58.00 1825. 18.6 77 2 "renault 5 gtl" +25.5 4 122.0 96.00 2300. 15.5 77 1 "plymouth arrow gs" +33.5 4 85.00 70.00 1945. 16.8 77 3 "datsun f-10 hatchback" +17.5 8 305.0 145.0 3880. 12.5 77 1 "chevrolet caprice classic" +17.0 8 260.0 110.0 4060. 19.0 77 1 "oldsmobile cutlass supreme" +15.5 8 318.0 145.0 4140. 13.7 77 1 "dodge monaco brougham" +15.0 8 302.0 130.0 4295. 14.9 77 1 "mercury cougar brougham" +17.5 6 250.0 110.0 3520. 16.4 77 1 "chevrolet concours" +20.5 6 231.0 105.0 3425. 16.9 77 1 "buick skylark" +19.0 6 225.0 100.0 3630. 17.7 77 1 "plymouth volare custom" +18.5 6 250.0 98.00 3525. 19.0 77 1 "ford granada" +16.0 8 400.0 180.0 4220. 11.1 77 1 "pontiac grand prix lj" +15.5 8 350.0 170.0 4165. 11.4 77 1 "chevrolet monte carlo landau" +15.5 8 400.0 190.0 4325. 12.2 77 1 "chrysler cordoba" +16.0 8 351.0 149.0 4335. 14.5 77 1 "ford thunderbird" +29.0 4 97.00 78.00 1940. 14.5 77 2 "volkswagen rabbit custom" +24.5 4 151.0 88.00 2740. 16.0 77 1 "pontiac sunbird coupe" +26.0 4 97.00 75.00 2265. 18.2 77 3 "toyota corolla liftback" +25.5 4 140.0 89.00 2755. 15.8 77 1 "ford mustang ii 2+2" +30.5 4 98.00 63.00 2051. 17.0 77 1 "chevrolet chevette" +33.5 4 98.00 83.00 2075. 15.9 77 1 "dodge colt m/m" +30.0 4 97.00 67.00 1985. 16.4 77 3 "subaru dl" +30.5 4 97.00 78.00 2190. 14.1 77 2 "volkswagen dasher" +22.0 6 146.0 97.00 2815. 14.5 77 3 "datsun 810" +21.5 4 121.0 110.0 2600. 12.8 77 2 "bmw 320i" +21.5 3 80.00 110.0 2720. 13.5 77 3 "mazda rx-4" +43.1 4 90.00 48.00 1985. 21.5 78 2 "volkswagen rabbit custom diesel" +36.1 4 98.00 66.00 1800. 14.4 78 1 "ford fiesta" +32.8 4 78.00 52.00 1985. 19.4 78 3 "mazda glc deluxe" +39.4 4 85.00 70.00 2070. 18.6 78 3 "datsun b210 gx" +36.1 4 91.00 60.00 1800. 16.4 78 3 "honda civic cvcc" +19.9 8 260.0 110.0 3365. 15.5 78 1 "oldsmobile cutlass salon brougham" +19.4 8 318.0 140.0 3735. 13.2 78 1 "dodge diplomat" +20.2 8 302.0 139.0 3570. 12.8 78 1 "mercury monarch ghia" +19.2 6 231.0 105.0 3535. 19.2 78 1 "pontiac phoenix lj" +20.5 6 200.0 95.00 3155. 18.2 78 1 "chevrolet malibu" +20.2 6 200.0 85.00 2965. 15.8 78 1 "ford fairmont (auto)" +25.1 4 140.0 88.00 2720. 15.4 78 1 "ford fairmont (man)" +20.5 6 225.0 100.0 3430. 17.2 78 1 "plymouth volare" +19.4 6 232.0 90.00 3210. 17.2 78 1 "amc concord" +20.6 6 231.0 105.0 3380. 15.8 78 1 "buick century special" +20.8 6 200.0 85.00 3070. 16.7 78 1 "mercury zephyr" +18.6 6 225.0 110.0 3620. 18.7 78 1 "dodge aspen" +18.1 6 258.0 120.0 3410. 15.1 78 1 "amc concord d/l" +19.2 8 305.0 145.0 3425. 13.2 78 1 "chevrolet monte carlo landau" +17.7 6 231.0 165.0 3445. 13.4 78 1 "buick regal sport coupe (turbo)" +18.1 8 302.0 139.0 3205. 11.2 78 1 "ford futura" +17.5 8 318.0 140.0 4080. 13.7 78 1 "dodge magnum xe" +30.0 4 98.00 68.00 2155. 16.5 78 1 "chevrolet chevette" +27.5 4 134.0 95.00 2560. 14.2 78 3 "toyota corona" +27.2 4 119.0 97.00 2300. 14.7 78 3 "datsun 510" +30.9 4 105.0 75.00 2230. 14.5 78 1 "dodge omni" +21.1 4 134.0 95.00 2515. 14.8 78 3 "toyota celica gt liftback" +23.2 4 156.0 105.0 2745. 16.7 78 1 "plymouth sapporo" +23.8 4 151.0 85.00 2855. 17.6 78 1 "oldsmobile starfire sx" +23.9 4 119.0 97.00 2405. 14.9 78 3 "datsun 200-sx" +20.3 5 131.0 103.0 2830. 15.9 78 2 "audi 5000" +17.0 6 163.0 125.0 3140. 13.6 78 2 "volvo 264gl" +21.6 4 121.0 115.0 2795. 15.7 78 2 "saab 99gle" +16.2 6 163.0 133.0 3410. 15.8 78 2 "peugeot 604sl" +31.5 4 89.00 71.00 1990. 14.9 78 2 "volkswagen scirocco" +29.5 4 98.00 68.00 2135. 16.6 78 3 "honda accord lx" +21.5 6 231.0 115.0 3245. 15.4 79 1 "pontiac lemans v6" +19.8 6 200.0 85.00 2990. 18.2 79 1 "mercury zephyr 6" +22.3 4 140.0 88.00 2890. 17.3 79 1 "ford fairmont 4" +20.2 6 232.0 90.00 3265. 18.2 79 1 "amc concord dl 6" +20.6 6 225.0 110.0 3360. 16.6 79 1 "dodge aspen 6" +17.0 8 305.0 130.0 3840. 15.4 79 1 "chevrolet caprice classic" +17.6 8 302.0 129.0 3725. 13.4 79 1 "ford ltd landau" +16.5 8 351.0 138.0 3955. 13.2 79 1 "mercury grand marquis" +18.2 8 318.0 135.0 3830. 15.2 79 1 "dodge st. regis" +16.9 8 350.0 155.0 4360. 14.9 79 1 "buick estate wagon (sw)" +15.5 8 351.0 142.0 4054. 14.3 79 1 "ford country squire (sw)" +19.2 8 267.0 125.0 3605. 15.0 79 1 "chevrolet malibu classic (sw)" +18.5 8 360.0 150.0 3940. 13.0 79 1 "chrysler lebaron town @ country (sw)" +31.9 4 89.00 71.00 1925. 14.0 79 2 "vw rabbit custom" +34.1 4 86.00 65.00 1975. 15.2 79 3 "maxda glc deluxe" +35.7 4 98.00 80.00 1915. 14.4 79 1 "dodge colt hatchback custom" +27.4 4 121.0 80.00 2670. 15.0 79 1 "amc spirit dl" +25.4 5 183.0 77.00 3530. 20.1 79 2 "mercedes benz 300d" +23.0 8 350.0 125.0 3900. 17.4 79 1 "cadillac eldorado" +27.2 4 141.0 71.00 3190. 24.8 79 2 "peugeot 504" +23.9 8 260.0 90.00 3420. 22.2 79 1 "oldsmobile cutlass salon brougham" +34.2 4 105.0 70.00 2200. 13.2 79 1 "plymouth horizon" +34.5 4 105.0 70.00 2150. 14.9 79 1 "plymouth horizon tc3" +31.8 4 85.00 65.00 2020. 19.2 79 3 "datsun 210" +37.3 4 91.00 69.00 2130. 14.7 79 2 "fiat strada custom" +28.4 4 151.0 90.00 2670. 16.0 79 1 "buick skylark limited" +28.8 6 173.0 115.0 2595. 11.3 79 1 "chevrolet citation" +26.8 6 173.0 115.0 2700. 12.9 79 1 "oldsmobile omega brougham" +33.5 4 151.0 90.00 2556. 13.2 79 1 "pontiac phoenix" +41.5 4 98.00 76.00 2144. 14.7 80 2 "vw rabbit" +38.1 4 89.00 60.00 1968. 18.8 80 3 "toyota corolla tercel" +32.1 4 98.00 70.00 2120. 15.5 80 1 "chevrolet chevette" +37.2 4 86.00 65.00 2019. 16.4 80 3 "datsun 310" +28.0 4 151.0 90.00 2678. 16.5 80 1 "chevrolet citation" +26.4 4 140.0 88.00 2870. 18.1 80 1 "ford fairmont" +24.3 4 151.0 90.00 3003. 20.1 80 1 "amc concord" +19.1 6 225.0 90.00 3381. 18.7 80 1 "dodge aspen" +34.3 4 97.00 78.00 2188. 15.8 80 2 "audi 4000" +29.8 4 134.0 90.00 2711. 15.5 80 3 "toyota corona liftback" +31.3 4 120.0 75.00 2542. 17.5 80 3 "mazda 626" +37.0 4 119.0 92.00 2434. 15.0 80 3 "datsun 510 hatchback" +32.2 4 108.0 75.00 2265. 15.2 80 3 "toyota corolla" +46.6 4 86.00 65.00 2110. 17.9 80 3 "mazda glc" +27.9 4 156.0 105.0 2800. 14.4 80 1 "dodge colt" +40.8 4 85.00 65.00 2110. 19.2 80 3 "datsun 210" +44.3 4 90.00 48.00 2085. 21.7 80 2 "vw rabbit c (diesel)" +43.4 4 90.00 48.00 2335. 23.7 80 2 "vw dasher (diesel)" +36.4 5 121.0 67.00 2950. 19.9 80 2 "audi 5000s (diesel)" +30.0 4 146.0 67.00 3250. 21.8 80 2 "mercedes-benz 240d" +44.6 4 91.00 67.00 1850. 13.8 80 3 "honda civic 1500 gl" +40.9 4 85.00 ? 1835. 17.3 80 2 "renault lecar deluxe" +33.8 4 97.00 67.00 2145. 18.0 80 3 "subaru dl" +29.8 4 89.00 62.00 1845. 15.3 80 2 "vokswagen rabbit" +32.7 6 168.0 132.0 2910. 11.4 80 3 "datsun 280-zx" +23.7 3 70.00 100.0 2420. 12.5 80 3 "mazda rx-7 gs" +35.0 4 122.0 88.00 2500. 15.1 80 2 "triumph tr7 coupe" +23.6 4 140.0 ? 2905. 14.3 80 1 "ford mustang cobra" +32.4 4 107.0 72.00 2290. 17.0 80 3 "honda accord" +27.2 4 135.0 84.00 2490. 15.7 81 1 "plymouth reliant" +26.6 4 151.0 84.00 2635. 16.4 81 1 "buick skylark" +25.8 4 156.0 92.00 2620. 14.4 81 1 "dodge aries wagon (sw)" +23.5 6 173.0 110.0 2725. 12.6 81 1 "chevrolet citation" +30.0 4 135.0 84.00 2385. 12.9 81 1 "plymouth reliant" +39.1 4 79.00 58.00 1755. 16.9 81 3 "toyota starlet" +39.0 4 86.00 64.00 1875. 16.4 81 1 "plymouth champ" +35.1 4 81.00 60.00 1760. 16.1 81 3 "honda civic 1300" +32.3 4 97.00 67.00 2065. 17.8 81 3 "subaru" +37.0 4 85.00 65.00 1975. 19.4 81 3 "datsun 210 mpg" +37.7 4 89.00 62.00 2050. 17.3 81 3 "toyota tercel" +34.1 4 91.00 68.00 1985. 16.0 81 3 "mazda glc 4" +34.7 4 105.0 63.00 2215. 14.9 81 1 "plymouth horizon 4" +34.4 4 98.00 65.00 2045. 16.2 81 1 "ford escort 4w" +29.9 4 98.00 65.00 2380. 20.7 81 1 "ford escort 2h" +33.0 4 105.0 74.00 2190. 14.2 81 2 "volkswagen jetta" +34.5 4 100.0 ? 2320. 15.8 81 2 "renault 18i" +33.7 4 107.0 75.00 2210. 14.4 81 3 "honda prelude" +32.4 4 108.0 75.00 2350. 16.8 81 3 "toyota corolla" +32.9 4 119.0 100.0 2615. 14.8 81 3 "datsun 200sx" +31.6 4 120.0 74.00 2635. 18.3 81 3 "mazda 626" +28.1 4 141.0 80.00 3230. 20.4 81 2 "peugeot 505s turbo diesel" +30.7 6 145.0 76.00 3160. 19.6 81 2 "volvo diesel" +25.4 6 168.0 116.0 2900. 12.6 81 3 "toyota cressida" +24.2 6 146.0 120.0 2930. 13.8 81 3 "datsun 810 maxima" +22.4 6 231.0 110.0 3415. 15.8 81 1 "buick century" +26.6 8 350.0 105.0 3725. 19.0 81 1 "oldsmobile cutlass ls" +20.2 6 200.0 88.00 3060. 17.1 81 1 "ford granada gl" +17.6 6 225.0 85.00 3465. 16.6 81 1 "chrysler lebaron salon" +28.0 4 112.0 88.00 2605. 19.6 82 1 "chevrolet cavalier" +27.0 4 112.0 88.00 2640. 18.6 82 1 "chevrolet cavalier wagon" +34.0 4 112.0 88.00 2395. 18.0 82 1 "chevrolet cavalier 2-door" +31.0 4 112.0 85.00 2575. 16.2 82 1 "pontiac j2000 se hatchback" +29.0 4 135.0 84.00 2525. 16.0 82 1 "dodge aries se" +27.0 4 151.0 90.00 2735. 18.0 82 1 "pontiac phoenix" +24.0 4 140.0 92.00 2865. 16.4 82 1 "ford fairmont futura" +23.0 4 151.0 ? 3035. 20.5 82 1 "amc concord dl" +36.0 4 105.0 74.00 1980. 15.3 82 2 "volkswagen rabbit l" +37.0 4 91.00 68.00 2025. 18.2 82 3 "mazda glc custom l" +31.0 4 91.00 68.00 1970. 17.6 82 3 "mazda glc custom" +38.0 4 105.0 63.00 2125. 14.7 82 1 "plymouth horizon miser" +36.0 4 98.00 70.00 2125. 17.3 82 1 "mercury lynx l" +36.0 4 120.0 88.00 2160. 14.5 82 3 "nissan stanza xe" +36.0 4 107.0 75.00 2205. 14.5 82 3 "honda accord" +34.0 4 108.0 70.00 2245 16.9 82 3 "toyota corolla" +38.0 4 91.00 67.00 1965. 15.0 82 3 "honda civic" +32.0 4 91.00 67.00 1965. 15.7 82 3 "honda civic (auto)" +38.0 4 91.00 67.00 1995. 16.2 82 3 "datsun 310 gx" +25.0 6 181.0 110.0 2945. 16.4 82 1 "buick century limited" +38.0 6 262.0 85.00 3015. 17.0 82 1 "oldsmobile cutlass ciera (diesel)" +26.0 4 156.0 92.00 2585. 14.5 82 1 "chrysler lebaron medallion" +22.0 6 232.0 112.0 2835 14.7 82 1 "ford granada l" +32.0 4 144.0 96.00 2665. 13.9 82 3 "toyota celica gt" +36.0 4 135.0 84.00 2370. 13.0 82 1 "dodge charger 2.2" +27.0 4 151.0 90.00 2950. 17.3 82 1 "chevrolet camaro" +27.0 4 140.0 86.00 2790. 15.6 82 1 "ford mustang gl" +44.0 4 97.00 52.00 2130. 24.6 82 2 "vw pickup" +32.0 4 135.0 84.00 2295. 11.6 82 1 "dodge rampage" +28.0 4 120.0 79.00 2625. 18.6 82 1 "ford ranger" +31.0 4 119.0 82.00 2720. 19.4 82 1 "chevy s-10" From a2d4b9a0feeb8b6729bc02fc8d695fffa61b2e55 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 3 Apr 2020 00:18:23 -0400 Subject: [PATCH 176/329] add file --- tf2.0/daily-minimum-temperatures-in-me.csv | 3654 ++++++++++++++++++++ 1 file changed, 3654 insertions(+) create mode 100644 tf2.0/daily-minimum-temperatures-in-me.csv diff --git a/tf2.0/daily-minimum-temperatures-in-me.csv b/tf2.0/daily-minimum-temperatures-in-me.csv new file mode 100644 index 00000000..be45e5a8 --- /dev/null +++ b/tf2.0/daily-minimum-temperatures-in-me.csv @@ -0,0 +1,3654 @@ +"Date","Daily minimum temperatures in Melbourne, Australia, 1981-1990" +"1981-01-01",20.7 +"1981-01-02",17.9 +"1981-01-03",18.8 +"1981-01-04",14.6 +"1981-01-05",15.8 +"1981-01-06",15.8 +"1981-01-07",15.8 +"1981-01-08",17.4 +"1981-01-09",21.8 +"1981-01-10",20.0 +"1981-01-11",16.2 +"1981-01-12",13.3 +"1981-01-13",16.7 +"1981-01-14",21.5 +"1981-01-15",25.0 +"1981-01-16",20.7 +"1981-01-17",20.6 +"1981-01-18",24.8 +"1981-01-19",17.7 +"1981-01-20",15.5 +"1981-01-21",18.2 +"1981-01-22",12.1 +"1981-01-23",14.4 +"1981-01-24",16.0 +"1981-01-25",16.5 +"1981-01-26",18.7 +"1981-01-27",19.4 +"1981-01-28",17.2 +"1981-01-29",15.5 +"1981-01-30",15.1 +"1981-01-31",15.4 +"1981-02-01",15.3 +"1981-02-02",18.8 +"1981-02-03",21.9 +"1981-02-04",19.9 +"1981-02-05",16.6 +"1981-02-06",16.8 +"1981-02-07",14.6 +"1981-02-08",17.1 +"1981-02-09",25.0 +"1981-02-10",15.0 +"1981-02-11",13.7 +"1981-02-12",13.9 +"1981-02-13",18.3 +"1981-02-14",22.0 +"1981-02-15",22.1 +"1981-02-16",21.2 +"1981-02-17",18.4 +"1981-02-18",16.6 +"1981-02-19",16.1 +"1981-02-20",15.7 +"1981-02-21",16.6 +"1981-02-22",16.5 +"1981-02-23",14.4 +"1981-02-24",14.4 +"1981-02-25",18.5 +"1981-02-26",16.9 +"1981-02-27",17.5 +"1981-02-28",21.2 +"1981-03-01",17.8 +"1981-03-02",18.6 +"1981-03-03",17.0 +"1981-03-04",16.0 +"1981-03-05",13.3 +"1981-03-06",14.3 +"1981-03-07",11.4 +"1981-03-08",16.3 +"1981-03-09",16.1 +"1981-03-10",11.8 +"1981-03-11",12.2 +"1981-03-12",14.7 +"1981-03-13",11.8 +"1981-03-14",11.3 +"1981-03-15",10.6 +"1981-03-16",11.7 +"1981-03-17",14.2 +"1981-03-18",11.2 +"1981-03-19",16.9 +"1981-03-20",16.7 +"1981-03-21",8.1 +"1981-03-22",8.0 +"1981-03-23",8.8 +"1981-03-24",13.4 +"1981-03-25",10.9 +"1981-03-26",13.4 +"1981-03-27",11.0 +"1981-03-28",15.0 +"1981-03-29",15.7 +"1981-03-30",14.5 +"1981-03-31",15.8 +"1981-04-01",16.7 +"1981-04-02",16.8 +"1981-04-03",17.5 +"1981-04-04",17.1 +"1981-04-05",18.1 +"1981-04-06",16.6 +"1981-04-07",10.0 +"1981-04-08",14.9 +"1981-04-09",15.9 +"1981-04-10",13.0 +"1981-04-11",7.6 +"1981-04-12",11.5 +"1981-04-13",13.5 +"1981-04-14",13.0 +"1981-04-15",13.3 +"1981-04-16",12.1 +"1981-04-17",12.4 +"1981-04-18",13.2 +"1981-04-19",13.8 +"1981-04-20",10.6 +"1981-04-21",9.0 +"1981-04-22",10.0 +"1981-04-23",9.8 +"1981-04-24",11.5 +"1981-04-25",8.9 +"1981-04-26",7.4 +"1981-04-27",9.9 +"1981-04-28",9.3 +"1981-04-29",9.9 +"1981-04-30",7.4 +"1981-05-01",8.6 +"1981-05-02",11.9 +"1981-05-03",14.0 +"1981-05-04",8.6 +"1981-05-05",10.0 +"1981-05-06",13.5 +"1981-05-07",12.0 +"1981-05-08",10.5 +"1981-05-09",10.7 +"1981-05-10",8.1 +"1981-05-11",10.1 +"1981-05-12",10.6 +"1981-05-13",5.3 +"1981-05-14",6.6 +"1981-05-15",8.5 +"1981-05-16",11.2 +"1981-05-17",9.8 +"1981-05-18",5.9 +"1981-05-19",3.2 +"1981-05-20",2.1 +"1981-05-21",3.4 +"1981-05-22",5.4 +"1981-05-23",9.6 +"1981-05-24",11.5 +"1981-05-25",12.3 +"1981-05-26",12.6 +"1981-05-27",11.0 +"1981-05-28",11.2 +"1981-05-29",11.4 +"1981-05-30",11.8 +"1981-05-31",12.8 +"1981-06-01",11.6 +"1981-06-02",10.6 +"1981-06-03",9.8 +"1981-06-04",11.2 +"1981-06-05",5.7 +"1981-06-06",7.1 +"1981-06-07",2.5 +"1981-06-08",3.5 +"1981-06-09",4.6 +"1981-06-10",11.0 +"1981-06-11",5.7 +"1981-06-12",7.7 +"1981-06-13",10.4 +"1981-06-14",11.4 +"1981-06-15",9.2 +"1981-06-16",6.1 +"1981-06-17",2.7 +"1981-06-18",4.3 +"1981-06-19",6.3 +"1981-06-20",3.8 +"1981-06-21",4.4 +"1981-06-22",7.1 +"1981-06-23",4.8 +"1981-06-24",5.8 +"1981-06-25",6.2 +"1981-06-26",7.3 +"1981-06-27",9.2 +"1981-06-28",10.2 +"1981-06-29",9.5 +"1981-06-30",9.5 +"1981-07-01",10.7 +"1981-07-02",10.0 +"1981-07-03",6.5 +"1981-07-04",7.0 +"1981-07-05",7.4 +"1981-07-06",8.1 +"1981-07-07",6.6 +"1981-07-08",8.3 +"1981-07-09",8.9 +"1981-07-10",4.6 +"1981-07-11",6.8 +"1981-07-12",5.7 +"1981-07-13",6.1 +"1981-07-14",7.0 +"1981-07-15",7.2 +"1981-07-16",6.3 +"1981-07-17",8.8 +"1981-07-18",5.0 +"1981-07-19",7.4 +"1981-07-20",10.1 +"1981-07-21",12.0 +"1981-07-22",9.0 +"1981-07-23",8.9 +"1981-07-24",9.8 +"1981-07-25",9.0 +"1981-07-26",9.2 +"1981-07-27",7.7 +"1981-07-28",8.0 +"1981-07-29",6.1 +"1981-07-30",3.5 +"1981-07-31",3.2 +"1981-08-01",5.7 +"1981-08-02",7.7 +"1981-08-03",9.0 +"1981-08-04",10.0 +"1981-08-05",6.2 +"1981-08-06",6.9 +"1981-08-07",6.5 +"1981-08-08",6.8 +"1981-08-09",7.0 +"1981-08-10",5.2 +"1981-08-11",3.0 +"1981-08-12",5.6 +"1981-08-13",7.9 +"1981-08-14",9.0 +"1981-08-15",8.6 +"1981-08-16",10.3 +"1981-08-17",10.5 +"1981-08-18",7.6 +"1981-08-19",9.7 +"1981-08-20",12.5 +"1981-08-21",7.4 +"1981-08-22",7.9 +"1981-08-23",3.9 +"1981-08-24",6.6 +"1981-08-25",4.6 +"1981-08-26",7.0 +"1981-08-27",6.0 +"1981-08-28",5.5 +"1981-08-29",8.1 +"1981-08-30",5.5 +"1981-08-31",6.2 +"1981-09-01",8.0 +"1981-09-02",10.3 +"1981-09-03",9.8 +"1981-09-04",9.6 +"1981-09-05",8.5 +"1981-09-06",7.5 +"1981-09-07",11.2 +"1981-09-08",14.6 +"1981-09-09",11.7 +"1981-09-10",7.8 +"1981-09-11",12.3 +"1981-09-12",10.1 +"1981-09-13",11.5 +"1981-09-14",7.3 +"1981-09-15",10.9 +"1981-09-16",14.1 +"1981-09-17",10.7 +"1981-09-18",16.9 +"1981-09-19",10.5 +"1981-09-20",6.5 +"1981-09-21",11.0 +"1981-09-22",6.3 +"1981-09-23",10.5 +"1981-09-24",7.2 +"1981-09-25",7.6 +"1981-09-26",10.7 +"1981-09-27",7.8 +"1981-09-28",9.6 +"1981-09-29",11.4 +"1981-09-30",12.4 +"1981-10-01",8.9 +"1981-10-02",13.2 +"1981-10-03",8.6 +"1981-10-04",6.2 +"1981-10-05",11.4 +"1981-10-06",13.2 +"1981-10-07",14.3 +"1981-10-08",7.3 +"1981-10-09",12.9 +"1981-10-10",7.8 +"1981-10-11",6.2 +"1981-10-12",5.6 +"1981-10-13",10.0 +"1981-10-14",13.3 +"1981-10-15",8.3 +"1981-10-16",10.2 +"1981-10-17",8.6 +"1981-10-18",7.3 +"1981-10-19",10.4 +"1981-10-20",11.2 +"1981-10-21",13.2 +"1981-10-22",11.4 +"1981-10-23",9.1 +"1981-10-24",6.6 +"1981-10-25",8.4 +"1981-10-26",9.7 +"1981-10-27",13.2 +"1981-10-28",12.5 +"1981-10-29",11.0 +"1981-10-30",11.0 +"1981-10-31",11.7 +"1981-11-01",9.2 +"1981-11-02",11.5 +"1981-11-03",13.6 +"1981-11-04",13.7 +"1981-11-05",10.4 +"1981-11-06",11.5 +"1981-11-07",7.6 +"1981-11-08",9.6 +"1981-11-09",14.2 +"1981-11-10",15.7 +"1981-11-11",10.5 +"1981-11-12",10.5 +"1981-11-13",9.7 +"1981-11-14",9.5 +"1981-11-15",11.3 +"1981-11-16",8.9 +"1981-11-17",9.4 +"1981-11-18",11.9 +"1981-11-19",11.7 +"1981-11-20",13.4 +"1981-11-21",12.6 +"1981-11-22",10.1 +"1981-11-23",15.8 +"1981-11-24",13.6 +"1981-11-25",11.9 +"1981-11-26",9.9 +"1981-11-27",12.6 +"1981-11-28",17.8 +"1981-11-29",15.0 +"1981-11-30",13.6 +"1981-12-01",13.4 +"1981-12-02",10.5 +"1981-12-03",14.2 +"1981-12-04",11.5 +"1981-12-05",13.0 +"1981-12-06",15.0 +"1981-12-07",14.7 +"1981-12-08",12.6 +"1981-12-09",12.5 +"1981-12-10",13.5 +"1981-12-11",14.8 +"1981-12-12",17.2 +"1981-12-13",9.7 +"1981-12-14",12.1 +"1981-12-15",12.8 +"1981-12-16",11.2 +"1981-12-17",16.4 +"1981-12-18",15.6 +"1981-12-19",13.3 +"1981-12-20",11.0 +"1981-12-21",11.1 +"1981-12-22",15.0 +"1981-12-23",12.8 +"1981-12-24",15.0 +"1981-12-25",14.2 +"1981-12-26",14.0 +"1981-12-27",15.5 +"1981-12-28",13.3 +"1981-12-29",15.6 +"1981-12-30",15.2 +"1981-12-31",17.4 +"1982-01-01",17.0 +"1982-01-02",15.0 +"1982-01-03",13.5 +"1982-01-04",15.2 +"1982-01-05",13.0 +"1982-01-06",12.5 +"1982-01-07",14.1 +"1982-01-08",14.8 +"1982-01-09",16.2 +"1982-01-10",15.8 +"1982-01-11",19.1 +"1982-01-12",22.2 +"1982-01-13",15.9 +"1982-01-14",13.0 +"1982-01-15",14.1 +"1982-01-16",15.8 +"1982-01-17",24.0 +"1982-01-18",18.0 +"1982-01-19",19.7 +"1982-01-20",25.2 +"1982-01-21",20.5 +"1982-01-22",19.3 +"1982-01-23",15.8 +"1982-01-24",17.0 +"1982-01-25",18.4 +"1982-01-26",13.3 +"1982-01-27",14.6 +"1982-01-28",12.5 +"1982-01-29",17.0 +"1982-01-30",17.1 +"1982-01-31",14.0 +"1982-02-01",14.6 +"1982-02-02",13.3 +"1982-02-03",14.8 +"1982-02-04",15.1 +"1982-02-05",13.1 +"1982-02-06",13.6 +"1982-02-07",19.5 +"1982-02-08",22.7 +"1982-02-09",17.2 +"1982-02-10",13.5 +"1982-02-11",15.4 +"1982-02-12",17.0 +"1982-02-13",19.2 +"1982-02-14",22.8 +"1982-02-15",26.3 +"1982-02-16",18.2 +"1982-02-17",17.0 +"1982-02-18",14.8 +"1982-02-19",12.8 +"1982-02-20",15.5 +"1982-02-21",15.6 +"1982-02-22",13.1 +"1982-02-23",15.2 +"1982-02-24",14.1 +"1982-02-25",12.5 +"1982-02-26",14.6 +"1982-02-27",10.4 +"1982-02-28",13.9 +"1982-03-01",11.9 +"1982-03-02",13.5 +"1982-03-03",9.8 +"1982-03-04",14.0 +"1982-03-05",21.5 +"1982-03-06",19.5 +"1982-03-07",16.7 +"1982-03-08",19.1 +"1982-03-09",11.0 +"1982-03-10",9.0 +"1982-03-11",10.0 +"1982-03-12",14.6 +"1982-03-13",12.5 +"1982-03-14",17.2 +"1982-03-15",19.2 +"1982-03-16",22.2 +"1982-03-17",15.7 +"1982-03-18",14.2 +"1982-03-19",9.8 +"1982-03-20",14.0 +"1982-03-21",17.5 +"1982-03-22",20.7 +"1982-03-23",15.6 +"1982-03-24",13.2 +"1982-03-25",14.5 +"1982-03-26",16.8 +"1982-03-27",17.2 +"1982-03-28",13.4 +"1982-03-29",14.2 +"1982-03-30",14.3 +"1982-03-31",10.2 +"1982-04-01",10.4 +"1982-04-02",12.3 +"1982-04-03",11.9 +"1982-04-04",11.2 +"1982-04-05",8.5 +"1982-04-06",12.0 +"1982-04-07",12.4 +"1982-04-08",12.9 +"1982-04-09",10.1 +"1982-04-10",15.0 +"1982-04-11",13.6 +"1982-04-12",12.4 +"1982-04-13",13.6 +"1982-04-14",16.1 +"1982-04-15",19.5 +"1982-04-16",14.2 +"1982-04-17",9.3 +"1982-04-18",10.1 +"1982-04-19",7.4 +"1982-04-20",8.6 +"1982-04-21",7.8 +"1982-04-22",9.1 +"1982-04-23",13.0 +"1982-04-24",16.5 +"1982-04-25",12.9 +"1982-04-26",6.9 +"1982-04-27",6.9 +"1982-04-28",8.7 +"1982-04-29",10.0 +"1982-04-30",10.8 +"1982-05-01",7.5 +"1982-05-02",6.3 +"1982-05-03",11.9 +"1982-05-04",13.8 +"1982-05-05",11.8 +"1982-05-06",11.0 +"1982-05-07",10.1 +"1982-05-08",8.5 +"1982-05-09",5.5 +"1982-05-10",7.6 +"1982-05-11",8.7 +"1982-05-12",10.8 +"1982-05-13",11.2 +"1982-05-14",9.1 +"1982-05-15",3.7 +"1982-05-16",4.6 +"1982-05-17",6.6 +"1982-05-18",13.2 +"1982-05-19",15.2 +"1982-05-20",7.6 +"1982-05-21",8.4 +"1982-05-22",6.0 +"1982-05-23",8.3 +"1982-05-24",8.6 +"1982-05-25",11.1 +"1982-05-26",12.1 +"1982-05-27",12.9 +"1982-05-28",14.0 +"1982-05-29",12.5 +"1982-05-30",11.5 +"1982-05-31",7.0 +"1982-06-01",7.1 +"1982-06-02",9.0 +"1982-06-03",3.1 +"1982-06-04",2.5 +"1982-06-05",0.0 +"1982-06-06",1.6 +"1982-06-07",2.6 +"1982-06-08",5.7 +"1982-06-09",2.3 +"1982-06-10",4.5 +"1982-06-11",8.2 +"1982-06-12",6.9 +"1982-06-13",7.3 +"1982-06-14",6.0 +"1982-06-15",7.3 +"1982-06-16",7.6 +"1982-06-17",8.0 +"1982-06-18",8.0 +"1982-06-19",6.8 +"1982-06-20",7.3 +"1982-06-21",6.2 +"1982-06-22",6.9 +"1982-06-23",8.9 +"1982-06-24",4.0 +"1982-06-25",1.3 +"1982-06-26",0.8 +"1982-06-27",4.3 +"1982-06-28",7.3 +"1982-06-29",7.7 +"1982-06-30",9.0 +"1982-07-01",4.2 +"1982-07-02",1.6 +"1982-07-03",2.6 +"1982-07-04",3.4 +"1982-07-05",3.9 +"1982-07-06",7.0 +"1982-07-07",7.8 +"1982-07-08",5.3 +"1982-07-09",2.4 +"1982-07-10",2.8 +"1982-07-11",4.0 +"1982-07-12",7.5 +"1982-07-13",7.8 +"1982-07-14",5.6 +"1982-07-15",3.3 +"1982-07-16",5.0 +"1982-07-17",3.7 +"1982-07-18",3.9 +"1982-07-19",5.2 +"1982-07-20",?0.2 +"1982-07-21",?0.8 +"1982-07-22",0.9 +"1982-07-23",3.5 +"1982-07-24",6.6 +"1982-07-25",9.5 +"1982-07-26",9.0 +"1982-07-27",3.5 +"1982-07-28",4.5 +"1982-07-29",5.7 +"1982-07-30",5.6 +"1982-07-31",7.1 +"1982-08-01",9.7 +"1982-08-02",8.3 +"1982-08-03",9.1 +"1982-08-04",2.8 +"1982-08-05",2.2 +"1982-08-06",4.5 +"1982-08-07",3.8 +"1982-08-08",3.8 +"1982-08-09",6.2 +"1982-08-10",11.5 +"1982-08-11",10.2 +"1982-08-12",7.9 +"1982-08-13",9.0 +"1982-08-14",9.5 +"1982-08-15",6.0 +"1982-08-16",8.2 +"1982-08-17",9.2 +"1982-08-18",4.3 +"1982-08-19",6.6 +"1982-08-20",9.4 +"1982-08-21",13.2 +"1982-08-22",6.6 +"1982-08-23",5.1 +"1982-08-24",12.1 +"1982-08-25",11.2 +"1982-08-26",8.5 +"1982-08-27",4.6 +"1982-08-28",7.0 +"1982-08-29",14.2 +"1982-08-30",12.7 +"1982-08-31",7.6 +"1982-09-01",4.0 +"1982-09-02",10.0 +"1982-09-03",10.5 +"1982-09-04",5.0 +"1982-09-05",4.5 +"1982-09-06",8.2 +"1982-09-07",4.3 +"1982-09-08",9.8 +"1982-09-09",5.8 +"1982-09-10",5.0 +"1982-09-11",8.5 +"1982-09-12",9.0 +"1982-09-13",3.6 +"1982-09-14",6.7 +"1982-09-15",6.7 +"1982-09-16",10.1 +"1982-09-17",15.0 +"1982-09-18",8.9 +"1982-09-19",5.7 +"1982-09-20",4.2 +"1982-09-21",4.0 +"1982-09-22",5.3 +"1982-09-23",6.3 +"1982-09-24",8.5 +"1982-09-25",11.5 +"1982-09-26",7.7 +"1982-09-27",9.2 +"1982-09-28",7.8 +"1982-09-29",6.3 +"1982-09-30",6.3 +"1982-10-01",8.6 +"1982-10-02",6.1 +"1982-10-03",13.2 +"1982-10-04",9.9 +"1982-10-05",4.7 +"1982-10-06",5.8 +"1982-10-07",14.9 +"1982-10-08",10.7 +"1982-10-09",8.6 +"1982-10-10",9.4 +"1982-10-11",5.7 +"1982-10-12",10.9 +"1982-10-13",13.1 +"1982-10-14",10.4 +"1982-10-15",8.2 +"1982-10-16",9.8 +"1982-10-17",7.5 +"1982-10-18",5.8 +"1982-10-19",9.8 +"1982-10-20",7.9 +"1982-10-21",8.7 +"1982-10-22",10.0 +"1982-10-23",10.6 +"1982-10-24",8.0 +"1982-10-25",10.2 +"1982-10-26",15.1 +"1982-10-27",13.9 +"1982-10-28",9.2 +"1982-10-29",9.0 +"1982-10-30",13.2 +"1982-10-31",7.0 +"1982-11-01",10.6 +"1982-11-02",6.9 +"1982-11-03",9.5 +"1982-11-04",12.5 +"1982-11-05",13.6 +"1982-11-06",17.7 +"1982-11-07",16.0 +"1982-11-08",11.3 +"1982-11-09",10.5 +"1982-11-10",14.4 +"1982-11-11",10.3 +"1982-11-12",9.0 +"1982-11-13",11.1 +"1982-11-14",14.5 +"1982-11-15",18.0 +"1982-11-16",12.8 +"1982-11-17",10.7 +"1982-11-18",9.1 +"1982-11-19",8.7 +"1982-11-20",12.4 +"1982-11-21",12.6 +"1982-11-22",10.3 +"1982-11-23",13.7 +"1982-11-24",16.0 +"1982-11-25",15.8 +"1982-11-26",12.1 +"1982-11-27",12.5 +"1982-11-28",12.2 +"1982-11-29",13.7 +"1982-11-30",16.1 +"1982-12-01",15.5 +"1982-12-02",10.3 +"1982-12-03",10.5 +"1982-12-04",11.0 +"1982-12-05",11.9 +"1982-12-06",13.0 +"1982-12-07",12.2 +"1982-12-08",10.6 +"1982-12-09",13.0 +"1982-12-10",13.0 +"1982-12-11",12.2 +"1982-12-12",12.6 +"1982-12-13",18.7 +"1982-12-14",15.2 +"1982-12-15",15.3 +"1982-12-16",13.9 +"1982-12-17",15.8 +"1982-12-18",13.0 +"1982-12-19",13.0 +"1982-12-20",13.7 +"1982-12-21",12.0 +"1982-12-22",10.8 +"1982-12-23",15.6 +"1982-12-24",15.3 +"1982-12-25",13.9 +"1982-12-26",13.0 +"1982-12-27",15.3 +"1982-12-28",16.3 +"1982-12-29",15.8 +"1982-12-30",17.7 +"1982-12-31",16.3 +"1983-01-01",18.4 +"1983-01-02",15.0 +"1983-01-03",10.9 +"1983-01-04",11.4 +"1983-01-05",14.8 +"1983-01-06",12.1 +"1983-01-07",12.8 +"1983-01-08",16.2 +"1983-01-09",15.5 +"1983-01-10",13.0 +"1983-01-11",10.5 +"1983-01-12",9.1 +"1983-01-13",10.5 +"1983-01-14",11.8 +"1983-01-15",12.7 +"1983-01-16",12.7 +"1983-01-17",11.5 +"1983-01-18",13.8 +"1983-01-19",13.3 +"1983-01-20",11.6 +"1983-01-21",15.4 +"1983-01-22",12.4 +"1983-01-23",16.9 +"1983-01-24",14.7 +"1983-01-25",10.6 +"1983-01-26",15.6 +"1983-01-27",10.7 +"1983-01-28",12.6 +"1983-01-29",13.8 +"1983-01-30",14.3 +"1983-01-31",14.0 +"1983-02-01",18.1 +"1983-02-02",17.3 +"1983-02-03",13.0 +"1983-02-04",16.0 +"1983-02-05",14.9 +"1983-02-06",16.2 +"1983-02-07",20.3 +"1983-02-08",22.5 +"1983-02-09",17.2 +"1983-02-10",15.9 +"1983-02-11",16.8 +"1983-02-12",13.8 +"1983-02-13",12.8 +"1983-02-14",14.0 +"1983-02-15",17.5 +"1983-02-16",21.5 +"1983-02-17",16.8 +"1983-02-18",13.6 +"1983-02-19",14.5 +"1983-02-20",14.2 +"1983-02-21",15.7 +"1983-02-22",19.7 +"1983-02-23",17.4 +"1983-02-24",14.4 +"1983-02-25",16.9 +"1983-02-26",19.1 +"1983-02-27",20.4 +"1983-02-28",20.1 +"1983-03-01",19.9 +"1983-03-02",22.0 +"1983-03-03",20.5 +"1983-03-04",22.1 +"1983-03-05",20.6 +"1983-03-06",15.0 +"1983-03-07",20.6 +"1983-03-08",21.5 +"1983-03-09",16.2 +"1983-03-10",14.1 +"1983-03-11",14.5 +"1983-03-12",21.1 +"1983-03-13",15.9 +"1983-03-14",15.2 +"1983-03-15",13.1 +"1983-03-16",13.2 +"1983-03-17",12.5 +"1983-03-18",15.2 +"1983-03-19",17.6 +"1983-03-20",15.5 +"1983-03-21",16.7 +"1983-03-22",16.3 +"1983-03-23",15.1 +"1983-03-24",12.7 +"1983-03-25",10.0 +"1983-03-26",11.4 +"1983-03-27",12.6 +"1983-03-28",10.7 +"1983-03-29",10.0 +"1983-03-30",13.9 +"1983-03-31",13.4 +"1983-04-01",12.5 +"1983-04-02",12.8 +"1983-04-03",7.8 +"1983-04-04",11.1 +"1983-04-05",10.7 +"1983-04-06",7.1 +"1983-04-07",6.7 +"1983-04-08",5.7 +"1983-04-09",9.1 +"1983-04-10",15.2 +"1983-04-11",15.5 +"1983-04-12",11.1 +"1983-04-13",11.7 +"1983-04-14",11.5 +"1983-04-15",9.8 +"1983-04-16",6.2 +"1983-04-17",6.7 +"1983-04-18",7.5 +"1983-04-19",8.8 +"1983-04-20",8.0 +"1983-04-21",10.4 +"1983-04-22",14.5 +"1983-04-23",16.5 +"1983-04-24",14.1 +"1983-04-25",10.5 +"1983-04-26",12.6 +"1983-04-27",13.0 +"1983-04-28",8.7 +"1983-04-29",10.1 +"1983-04-30",12.0 +"1983-05-01",12.5 +"1983-05-02",13.5 +"1983-05-03",13.7 +"1983-05-04",13.5 +"1983-05-05",10.7 +"1983-05-06",13.0 +"1983-05-07",11.6 +"1983-05-08",13.0 +"1983-05-09",11.2 +"1983-05-10",13.5 +"1983-05-11",12.9 +"1983-05-12",6.8 +"1983-05-13",10.0 +"1983-05-14",14.5 +"1983-05-15",11.7 +"1983-05-16",6.7 +"1983-05-17",4.6 +"1983-05-18",4.9 +"1983-05-19",7.4 +"1983-05-20",8.3 +"1983-05-21",7.5 +"1983-05-22",6.2 +"1983-05-23",7.8 +"1983-05-24",13.2 +"1983-05-25",11.9 +"1983-05-26",6.5 +"1983-05-27",8.3 +"1983-05-28",12.1 +"1983-05-29",9.3 +"1983-05-30",7.5 +"1983-05-31",9.3 +"1983-06-01",11.0 +"1983-06-02",10.8 +"1983-06-03",5.3 +"1983-06-04",7.6 +"1983-06-05",5.6 +"1983-06-06",7.2 +"1983-06-07",9.6 +"1983-06-08",7.0 +"1983-06-09",8.3 +"1983-06-10",7.8 +"1983-06-11",4.7 +"1983-06-12",6.8 +"1983-06-13",7.2 +"1983-06-14",8.3 +"1983-06-15",9.5 +"1983-06-16",4.7 +"1983-06-17",3.0 +"1983-06-18",1.5 +"1983-06-19",2.5 +"1983-06-20",6.2 +"1983-06-21",11.6 +"1983-06-22",6.6 +"1983-06-23",6.6 +"1983-06-24",8.0 +"1983-06-25",7.9 +"1983-06-26",3.3 +"1983-06-27",3.9 +"1983-06-28",6.0 +"1983-06-29",4.0 +"1983-06-30",5.5 +"1983-07-01",8.5 +"1983-07-02",9.8 +"1983-07-03",9.5 +"1983-07-04",7.2 +"1983-07-05",8.1 +"1983-07-06",8.0 +"1983-07-07",8.5 +"1983-07-08",8.8 +"1983-07-09",8.3 +"1983-07-10",2.4 +"1983-07-11",4.9 +"1983-07-12",5.9 +"1983-07-13",6.7 +"1983-07-14",8.4 +"1983-07-15",6.5 +"1983-07-16",7.9 +"1983-07-17",4.1 +"1983-07-18",5.4 +"1983-07-19",7.5 +"1983-07-20",3.9 +"1983-07-21",2.5 +"1983-07-22",5.3 +"1983-07-23",6.6 +"1983-07-24",0.0 +"1983-07-25",0.7 +"1983-07-26",7.6 +"1983-07-27",12.3 +"1983-07-28",9.2 +"1983-07-29",9.6 +"1983-07-30",9.5 +"1983-07-31",10.0 +"1983-08-01",7.7 +"1983-08-02",8.0 +"1983-08-03",8.3 +"1983-08-04",8.3 +"1983-08-05",4.5 +"1983-08-06",6.5 +"1983-08-07",9.4 +"1983-08-08",9.4 +"1983-08-09",10.5 +"1983-08-10",10.7 +"1983-08-11",9.9 +"1983-08-12",7.6 +"1983-08-13",5.8 +"1983-08-14",8.5 +"1983-08-15",13.8 +"1983-08-16",14.3 +"1983-08-17",8.3 +"1983-08-18",5.3 +"1983-08-19",3.0 +"1983-08-20",5.2 +"1983-08-21",10.3 +"1983-08-22",11.1 +"1983-08-23",10.5 +"1983-08-24",9.0 +"1983-08-25",13.0 +"1983-08-26",6.4 +"1983-08-27",8.4 +"1983-08-28",6.7 +"1983-08-29",8.3 +"1983-08-30",11.2 +"1983-08-31",10.0 +"1983-09-01",10.1 +"1983-09-02",10.6 +"1983-09-03",10.9 +"1983-09-04",5.7 +"1983-09-05",9.5 +"1983-09-06",10.4 +"1983-09-07",11.1 +"1983-09-08",12.2 +"1983-09-09",10.6 +"1983-09-10",8.8 +"1983-09-11",9.2 +"1983-09-12",5.5 +"1983-09-13",7.1 +"1983-09-14",6.5 +"1983-09-15",4.3 +"1983-09-16",5.0 +"1983-09-17",11.2 +"1983-09-18",7.5 +"1983-09-19",12.0 +"1983-09-20",13.6 +"1983-09-21",8.3 +"1983-09-22",8.5 +"1983-09-23",12.9 +"1983-09-24",7.7 +"1983-09-25",7.6 +"1983-09-26",3.5 +"1983-09-27",10.4 +"1983-09-28",15.4 +"1983-09-29",10.6 +"1983-09-30",9.6 +"1983-10-01",9.3 +"1983-10-02",13.9 +"1983-10-03",7.7 +"1983-10-04",9.5 +"1983-10-05",7.6 +"1983-10-06",6.9 +"1983-10-07",6.8 +"1983-10-08",5.8 +"1983-10-09",6.0 +"1983-10-10",8.3 +"1983-10-11",9.1 +"1983-10-12",12.5 +"1983-10-13",13.2 +"1983-10-14",16.2 +"1983-10-15",12.5 +"1983-10-16",11.8 +"1983-10-17",10.6 +"1983-10-18",10.0 +"1983-10-19",12.2 +"1983-10-20",8.9 +"1983-10-21",10.3 +"1983-10-22",7.5 +"1983-10-23",11.6 +"1983-10-24",12.6 +"1983-10-25",12.9 +"1983-10-26",11.7 +"1983-10-27",14.0 +"1983-10-28",12.3 +"1983-10-29",9.0 +"1983-10-30",9.2 +"1983-10-31",9.8 +"1983-11-01",11.8 +"1983-11-02",10.6 +"1983-11-03",12.6 +"1983-11-04",11.0 +"1983-11-05",8.2 +"1983-11-06",7.5 +"1983-11-07",13.6 +"1983-11-08",14.8 +"1983-11-09",10.9 +"1983-11-10",7.7 +"1983-11-11",10.2 +"1983-11-12",10.8 +"1983-11-13",10.8 +"1983-11-14",12.5 +"1983-11-15",13.2 +"1983-11-16",8.7 +"1983-11-17",5.7 +"1983-11-18",9.8 +"1983-11-19",7.3 +"1983-11-20",10.8 +"1983-11-21",10.0 +"1983-11-22",16.2 +"1983-11-23",15.0 +"1983-11-24",14.5 +"1983-11-25",15.9 +"1983-11-26",14.9 +"1983-11-27",14.2 +"1983-11-28",15.8 +"1983-11-29",17.2 +"1983-11-30",17.6 +"1983-12-01",12.1 +"1983-12-02",11.4 +"1983-12-03",13.0 +"1983-12-04",13.2 +"1983-12-05",12.0 +"1983-12-06",15.3 +"1983-12-07",12.7 +"1983-12-08",12.1 +"1983-12-09",13.8 +"1983-12-10",10.9 +"1983-12-11",12.0 +"1983-12-12",16.5 +"1983-12-13",15.0 +"1983-12-14",11.2 +"1983-12-15",13.9 +"1983-12-16",15.0 +"1983-12-17",14.8 +"1983-12-18",15.0 +"1983-12-19",13.3 +"1983-12-20",20.4 +"1983-12-21",18.0 +"1983-12-22",12.2 +"1983-12-23",16.7 +"1983-12-24",13.8 +"1983-12-25",17.5 +"1983-12-26",15.0 +"1983-12-27",13.9 +"1983-12-28",11.1 +"1983-12-29",16.1 +"1983-12-30",20.4 +"1983-12-31",18.0 +"1984-01-01",19.5 +"1984-01-02",17.1 +"1984-01-03",17.1 +"1984-01-04",12.0 +"1984-01-05",11.0 +"1984-01-06",16.3 +"1984-01-07",16.1 +"1984-01-08",13.0 +"1984-01-09",13.4 +"1984-01-10",15.2 +"1984-01-11",12.5 +"1984-01-12",14.3 +"1984-01-13",16.5 +"1984-01-14",18.6 +"1984-01-15",18.0 +"1984-01-16",18.2 +"1984-01-17",11.4 +"1984-01-18",11.9 +"1984-01-19",12.2 +"1984-01-20",14.8 +"1984-01-21",13.1 +"1984-01-22",12.7 +"1984-01-23",10.5 +"1984-01-24",13.8 +"1984-01-25",18.8 +"1984-01-26",13.9 +"1984-01-27",11.2 +"1984-01-28",10.6 +"1984-01-29",14.7 +"1984-01-30",13.1 +"1984-01-31",12.1 +"1984-02-01",14.7 +"1984-02-02",11.1 +"1984-02-03",13.0 +"1984-02-04",15.6 +"1984-02-05",14.2 +"1984-02-06",15.5 +"1984-02-07",18.0 +"1984-02-08",15.0 +"1984-02-09",15.9 +"1984-02-10",15.5 +"1984-02-11",15.8 +"1984-02-12",16.6 +"1984-02-13",13.6 +"1984-02-14",13.8 +"1984-02-15",14.6 +"1984-02-16",15.6 +"1984-02-17",16.6 +"1984-02-18",14.3 +"1984-02-19",16.3 +"1984-02-20",18.9 +"1984-02-21",18.7 +"1984-02-22",14.5 +"1984-02-23",16.5 +"1984-02-24",14.1 +"1984-02-25",13.5 +"1984-02-26",11.7 +"1984-02-27",15.1 +"1984-02-28",11.2 +"1984-02-29",13.5 +"1984-03-01",12.6 +"1984-03-02",8.8 +"1984-03-03",10.5 +"1984-03-04",12.1 +"1984-03-05",14.5 +"1984-03-06",19.5 +"1984-03-07",14.0 +"1984-03-08",13.8 +"1984-03-09",10.5 +"1984-03-10",13.8 +"1984-03-11",11.4 +"1984-03-12",15.6 +"1984-03-13",11.1 +"1984-03-14",12.1 +"1984-03-15",14.2 +"1984-03-16",10.9 +"1984-03-17",14.2 +"1984-03-18",13.8 +"1984-03-19",15.1 +"1984-03-20",14.0 +"1984-03-21",12.1 +"1984-03-22",13.8 +"1984-03-23",16.6 +"1984-03-24",17.8 +"1984-03-25",9.4 +"1984-03-26",10.2 +"1984-03-27",7.4 +"1984-03-28",8.7 +"1984-03-29",14.0 +"1984-03-30",15.3 +"1984-03-31",11.1 +"1984-04-01",9.7 +"1984-04-02",10.3 +"1984-04-03",9.2 +"1984-04-04",8.2 +"1984-04-05",9.7 +"1984-04-06",12.4 +"1984-04-07",12.5 +"1984-04-08",9.0 +"1984-04-09",9.7 +"1984-04-10",10.1 +"1984-04-11",11.2 +"1984-04-12",12.0 +"1984-04-13",11.1 +"1984-04-14",10.8 +"1984-04-15",12.8 +"1984-04-16",9.8 +"1984-04-17",13.7 +"1984-04-18",11.0 +"1984-04-19",13.2 +"1984-04-20",13.0 +"1984-04-21",10.2 +"1984-04-22",13.2 +"1984-04-23",9.3 +"1984-04-24",11.1 +"1984-04-25",10.3 +"1984-04-26",8.7 +"1984-04-27",11.7 +"1984-04-28",12.5 +"1984-04-29",6.5 +"1984-04-30",9.6 +"1984-05-01",13.8 +"1984-05-02",14.7 +"1984-05-03",9.1 +"1984-05-04",4.8 +"1984-05-05",3.3 +"1984-05-06",3.5 +"1984-05-07",5.7 +"1984-05-08",5.5 +"1984-05-09",7.0 +"1984-05-10",9.5 +"1984-05-11",9.9 +"1984-05-12",4.9 +"1984-05-13",6.3 +"1984-05-14",4.8 +"1984-05-15",6.2 +"1984-05-16",7.1 +"1984-05-17",7.5 +"1984-05-18",9.4 +"1984-05-19",8.7 +"1984-05-20",9.5 +"1984-05-21",12.1 +"1984-05-22",9.5 +"1984-05-23",9.3 +"1984-05-24",8.5 +"1984-05-25",8.0 +"1984-05-26",9.8 +"1984-05-27",6.2 +"1984-05-28",7.3 +"1984-05-29",10.9 +"1984-05-30",10.0 +"1984-05-31",8.7 +"1984-06-01",9.0 +"1984-06-02",10.8 +"1984-06-03",12.4 +"1984-06-04",7.2 +"1984-06-05",7.2 +"1984-06-06",11.1 +"1984-06-07",9.3 +"1984-06-08",10.1 +"1984-06-09",3.9 +"1984-06-10",5.0 +"1984-06-11",8.2 +"1984-06-12",2.8 +"1984-06-13",4.3 +"1984-06-14",8.1 +"1984-06-15",11.1 +"1984-06-16",4.7 +"1984-06-17",5.3 +"1984-06-18",10.0 +"1984-06-19",5.6 +"1984-06-20",2.2 +"1984-06-21",7.1 +"1984-06-22",8.3 +"1984-06-23",8.6 +"1984-06-24",10.1 +"1984-06-25",8.3 +"1984-06-26",7.2 +"1984-06-27",7.7 +"1984-06-28",7.8 +"1984-06-29",9.1 +"1984-06-30",9.4 +"1984-07-01",7.8 +"1984-07-02",2.6 +"1984-07-03",2.4 +"1984-07-04",3.9 +"1984-07-05",1.3 +"1984-07-06",2.1 +"1984-07-07",7.4 +"1984-07-08",7.2 +"1984-07-09",8.8 +"1984-07-10",8.9 +"1984-07-11",8.8 +"1984-07-12",8.0 +"1984-07-13",0.7 +"1984-07-14",?0.1 +"1984-07-15",0.9 +"1984-07-16",7.8 +"1984-07-17",7.2 +"1984-07-18",8.0 +"1984-07-19",4.6 +"1984-07-20",5.2 +"1984-07-21",5.8 +"1984-07-22",6.8 +"1984-07-23",8.1 +"1984-07-24",7.5 +"1984-07-25",5.4 +"1984-07-26",4.6 +"1984-07-27",6.4 +"1984-07-28",9.7 +"1984-07-29",7.0 +"1984-07-30",10.0 +"1984-07-31",10.6 +"1984-08-01",11.5 +"1984-08-02",10.2 +"1984-08-03",11.1 +"1984-08-04",11.0 +"1984-08-05",8.9 +"1984-08-06",9.9 +"1984-08-07",11.7 +"1984-08-08",11.6 +"1984-08-09",9.0 +"1984-08-10",6.3 +"1984-08-11",8.7 +"1984-08-12",8.5 +"1984-08-13",8.5 +"1984-08-14",8.0 +"1984-08-15",6.0 +"1984-08-16",8.0 +"1984-08-17",8.5 +"1984-08-18",7.7 +"1984-08-19",8.4 +"1984-08-20",9.0 +"1984-08-21",8.3 +"1984-08-22",6.8 +"1984-08-23",9.3 +"1984-08-24",6.7 +"1984-08-25",9.0 +"1984-08-26",7.3 +"1984-08-27",6.3 +"1984-08-28",7.9 +"1984-08-29",5.2 +"1984-08-30",9.0 +"1984-08-31",11.3 +"1984-09-01",9.2 +"1984-09-02",11.3 +"1984-09-03",7.0 +"1984-09-04",8.0 +"1984-09-05",4.6 +"1984-09-06",8.5 +"1984-09-07",9.5 +"1984-09-08",9.4 +"1984-09-09",10.5 +"1984-09-10",9.7 +"1984-09-11",4.9 +"1984-09-12",8.0 +"1984-09-13",5.8 +"1984-09-14",5.5 +"1984-09-15",10.9 +"1984-09-16",11.7 +"1984-09-17",9.2 +"1984-09-18",8.9 +"1984-09-19",11.3 +"1984-09-20",8.6 +"1984-09-21",6.2 +"1984-09-22",6.6 +"1984-09-23",9.1 +"1984-09-24",6.1 +"1984-09-25",7.5 +"1984-09-26",10.7 +"1984-09-27",6.3 +"1984-09-28",5.5 +"1984-09-29",6.7 +"1984-09-30",4.2 +"1984-10-01",11.3 +"1984-10-02",16.3 +"1984-10-03",10.5 +"1984-10-04",10.3 +"1984-10-05",7.9 +"1984-10-06",7.7 +"1984-10-07",16.0 +"1984-10-08",14.6 +"1984-10-09",12.5 +"1984-10-10",8.1 +"1984-10-11",12.2 +"1984-10-12",17.2 +"1984-10-13",9.4 +"1984-10-14",8.7 +"1984-10-15",5.9 +"1984-10-16",4.8 +"1984-10-17",7.4 +"1984-10-18",9.4 +"1984-10-19",9.7 +"1984-10-20",9.9 +"1984-10-21",6.5 +"1984-10-22",9.8 +"1984-10-23",18.2 +"1984-10-24",11.3 +"1984-10-25",9.1 +"1984-10-26",9.6 +"1984-10-27",13.5 +"1984-10-28",10.7 +"1984-10-29",10.0 +"1984-10-30",8.5 +"1984-10-31",12.6 +"1984-11-01",16.6 +"1984-11-02",11.6 +"1984-11-03",12.2 +"1984-11-04",11.2 +"1984-11-05",9.2 +"1984-11-06",9.9 +"1984-11-07",11.9 +"1984-11-08",15.6 +"1984-11-09",19.0 +"1984-11-10",12.8 +"1984-11-11",12.2 +"1984-11-12",12.0 +"1984-11-13",11.1 +"1984-11-14",11.8 +"1984-11-15",7.6 +"1984-11-16",13.0 +"1984-11-17",12.7 +"1984-11-18",16.0 +"1984-11-19",14.8 +"1984-11-20",14.2 +"1984-11-21",10.0 +"1984-11-22",8.8 +"1984-11-23",11.6 +"1984-11-24",8.6 +"1984-11-25",14.6 +"1984-11-26",24.3 +"1984-11-27",11.6 +"1984-11-28",10.8 +"1984-11-29",12.0 +"1984-11-30",11.0 +"1984-12-01",12.6 +"1984-12-02",10.8 +"1984-12-03",9.1 +"1984-12-04",11.0 +"1984-12-05",13.0 +"1984-12-06",12.8 +"1984-12-07",9.9 +"1984-12-08",11.6 +"1984-12-09",10.5 +"1984-12-10",15.9 +"1984-12-11",12.2 +"1984-12-12",13.0 +"1984-12-13",12.5 +"1984-12-14",12.5 +"1984-12-15",11.4 +"1984-12-16",12.1 +"1984-12-17",16.8 +"1984-12-18",12.1 +"1984-12-19",11.3 +"1984-12-20",10.4 +"1984-12-21",14.2 +"1984-12-22",11.4 +"1984-12-23",13.7 +"1984-12-24",16.5 +"1984-12-25",12.8 +"1984-12-26",12.2 +"1984-12-27",12.0 +"1984-12-28",12.6 +"1984-12-29",16.0 +"1984-12-30",16.4 +"1985-01-01",13.3 +"1985-01-02",15.2 +"1985-01-03",13.1 +"1985-01-04",12.7 +"1985-01-05",14.6 +"1985-01-06",11.0 +"1985-01-07",13.2 +"1985-01-08",12.2 +"1985-01-09",14.4 +"1985-01-10",13.7 +"1985-01-11",14.5 +"1985-01-12",14.1 +"1985-01-13",14.4 +"1985-01-14",19.7 +"1985-01-15",16.5 +"1985-01-16",15.9 +"1985-01-17",11.8 +"1985-01-18",12.0 +"1985-01-19",11.4 +"1985-01-20",14.4 +"1985-01-21",12.4 +"1985-01-22",15.1 +"1985-01-23",15.6 +"1985-01-24",15.2 +"1985-01-25",12.8 +"1985-01-26",13.3 +"1985-01-27",17.5 +"1985-01-28",15.4 +"1985-01-29",13.5 +"1985-01-30",16.7 +"1985-01-31",15.2 +"1985-02-01",14.9 +"1985-02-02",10.2 +"1985-02-03",13.6 +"1985-02-04",19.0 +"1985-02-05",15.7 +"1985-02-06",18.0 +"1985-02-07",14.8 +"1985-02-08",13.9 +"1985-02-09",13.0 +"1985-02-10",15.3 +"1985-02-11",14.3 +"1985-02-12",15.6 +"1985-02-13",16.0 +"1985-02-14",14.9 +"1985-02-15",11.1 +"1985-02-16",14.8 +"1985-02-17",13.0 +"1985-02-18",12.2 +"1985-02-19",10.9 +"1985-02-20",14.6 +"1985-02-21",16.6 +"1985-02-22",18.1 +"1985-02-23",13.4 +"1985-02-24",10.3 +"1985-02-25",13.6 +"1985-02-26",13.8 +"1985-02-27",10.3 +"1985-02-28",11.0 +"1985-03-01",14.3 +"1985-03-02",15.5 +"1985-03-03",14.7 +"1985-03-04",12.7 +"1985-03-05",10.7 +"1985-03-06",12.6 +"1985-03-07",9.8 +"1985-03-08",13.2 +"1985-03-09",15.2 +"1985-03-10",16.6 +"1985-03-11",21.0 +"1985-03-12",22.4 +"1985-03-13",17.0 +"1985-03-14",21.7 +"1985-03-15",21.4 +"1985-03-16",18.6 +"1985-03-17",16.2 +"1985-03-18",16.8 +"1985-03-19",17.0 +"1985-03-20",18.4 +"1985-03-21",17.2 +"1985-03-22",18.4 +"1985-03-23",18.8 +"1985-03-24",16.5 +"1985-03-25",13.3 +"1985-03-26",12.2 +"1985-03-27",11.3 +"1985-03-28",13.8 +"1985-03-29",16.6 +"1985-03-30",14.0 +"1985-03-31",14.3 +"1985-04-01",16.4 +"1985-04-02",11.9 +"1985-04-03",15.7 +"1985-04-04",17.6 +"1985-04-05",17.5 +"1985-04-06",15.9 +"1985-04-07",16.2 +"1985-04-08",16.0 +"1985-04-09",15.9 +"1985-04-10",16.2 +"1985-04-11",16.2 +"1985-04-12",19.5 +"1985-04-13",18.2 +"1985-04-14",21.8 +"1985-04-15",15.1 +"1985-04-16",11.0 +"1985-04-17",8.1 +"1985-04-18",9.5 +"1985-04-19",9.3 +"1985-04-20",10.6 +"1985-04-21",6.3 +"1985-04-22",8.6 +"1985-04-23",6.8 +"1985-04-24",8.7 +"1985-04-25",8.4 +"1985-04-26",9.3 +"1985-04-27",10.0 +"1985-04-28",10.5 +"1985-04-29",12.0 +"1985-04-30",10.1 +"1985-05-01",9.4 +"1985-05-02",10.1 +"1985-05-03",8.0 +"1985-05-04",10.6 +"1985-05-05",13.6 +"1985-05-06",15.4 +"1985-05-07",9.0 +"1985-05-08",10.4 +"1985-05-09",11.0 +"1985-05-10",12.1 +"1985-05-11",13.4 +"1985-05-12",11.3 +"1985-05-13",6.7 +"1985-05-14",9.8 +"1985-05-15",10.8 +"1985-05-16",7.8 +"1985-05-17",4.5 +"1985-05-18",7.6 +"1985-05-19",6.9 +"1985-05-20",7.5 +"1985-05-21",8.5 +"1985-05-22",5.5 +"1985-05-23",9.5 +"1985-05-24",7.3 +"1985-05-25",5.4 +"1985-05-26",5.5 +"1985-05-27",8.1 +"1985-05-28",11.2 +"1985-05-29",13.4 +"1985-05-30",11.6 +"1985-05-31",10.1 +"1985-06-01",4.3 +"1985-06-02",5.5 +"1985-06-03",4.4 +"1985-06-04",5.9 +"1985-06-05",5.7 +"1985-06-06",8.2 +"1985-06-07",8.2 +"1985-06-08",4.2 +"1985-06-09",6.5 +"1985-06-10",10.0 +"1985-06-11",8.8 +"1985-06-12",6.6 +"1985-06-13",7.8 +"1985-06-14",10.1 +"1985-06-15",7.1 +"1985-06-16",7.7 +"1985-06-17",8.5 +"1985-06-18",7.3 +"1985-06-19",6.9 +"1985-06-20",8.4 +"1985-06-21",7.1 +"1985-06-22",6.3 +"1985-06-23",0.6 +"1985-06-24",1.6 +"1985-06-25",7.0 +"1985-06-26",8.3 +"1985-06-27",8.0 +"1985-06-28",10.2 +"1985-06-29",10.6 +"1985-06-30",10.4 +"1985-07-01",11.6 +"1985-07-02",11.0 +"1985-07-03",10.7 +"1985-07-04",7.3 +"1985-07-05",4.2 +"1985-07-06",4.7 +"1985-07-07",5.6 +"1985-07-08",7.7 +"1985-07-09",7.5 +"1985-07-10",4.9 +"1985-07-11",5.9 +"1985-07-12",7.8 +"1985-07-13",5.8 +"1985-07-14",7.0 +"1985-07-15",8.4 +"1985-07-16",6.2 +"1985-07-17",7.5 +"1985-07-18",4.8 +"1985-07-19",3.3 +"1985-07-20",3.2 +"1985-07-21",7.0 +"1985-07-22",8.4 +"1985-07-23",0.3 +"1985-07-24",0.3 +"1985-07-25",2.1 +"1985-07-26",8.5 +"1985-07-27",1.4 +"1985-07-28",4.1 +"1985-07-29",10.3 +"1985-07-30",6.6 +"1985-07-31",6.1 +"1985-08-01",7.0 +"1985-08-02",5.1 +"1985-08-03",6.3 +"1985-08-04",6.9 +"1985-08-05",11.4 +"1985-08-06",10.4 +"1985-08-07",10.3 +"1985-08-08",9.2 +"1985-08-09",7.2 +"1985-08-10",7.5 +"1985-08-11",4.0 +"1985-08-12",5.6 +"1985-08-13",6.7 +"1985-08-14",8.4 +"1985-08-15",11.0 +"1985-08-16",8.4 +"1985-08-17",8.8 +"1985-08-18",8.6 +"1985-08-19",8.3 +"1985-08-20",4.0 +"1985-08-21",3.6 +"1985-08-22",5.7 +"1985-08-23",10.6 +"1985-08-24",6.9 +"1985-08-25",10.0 +"1985-08-26",9.8 +"1985-08-27",7.2 +"1985-08-28",10.5 +"1985-08-29",3.6 +"1985-08-30",5.3 +"1985-08-31",8.4 +"1985-09-01",10.3 +"1985-09-02",7.9 +"1985-09-03",8.5 +"1985-09-04",7.9 +"1985-09-05",8.0 +"1985-09-06",9.8 +"1985-09-07",6.7 +"1985-09-08",4.8 +"1985-09-09",9.9 +"1985-09-10",12.8 +"1985-09-11",10.9 +"1985-09-12",11.7 +"1985-09-13",11.7 +"1985-09-14",11.0 +"1985-09-15",8.2 +"1985-09-16",7.5 +"1985-09-17",5.4 +"1985-09-18",7.2 +"1985-09-19",9.7 +"1985-09-20",8.4 +"1985-09-21",9.0 +"1985-09-22",8.7 +"1985-09-23",6.6 +"1985-09-24",11.6 +"1985-09-25",13.1 +"1985-09-26",6.7 +"1985-09-27",6.5 +"1985-09-28",7.7 +"1985-09-29",8.7 +"1985-09-30",7.2 +"1985-10-01",10.5 +"1985-10-02",8.6 +"1985-10-03",7.2 +"1985-10-04",11.4 +"1985-10-05",16.2 +"1985-10-06",6.1 +"1985-10-07",9.6 +"1985-10-08",11.1 +"1985-10-09",13.6 +"1985-10-10",10.7 +"1985-10-11",14.7 +"1985-10-12",11.6 +"1985-10-13",7.3 +"1985-10-14",8.0 +"1985-10-15",9.6 +"1985-10-16",16.0 +"1985-10-17",15.1 +"1985-10-18",12.8 +"1985-10-19",6.2 +"1985-10-20",7.1 +"1985-10-21",8.4 +"1985-10-22",10.0 +"1985-10-23",12.7 +"1985-10-24",10.0 +"1985-10-25",10.2 +"1985-10-26",6.5 +"1985-10-27",9.2 +"1985-10-28",11.9 +"1985-10-29",14.7 +"1985-10-30",11.4 +"1985-10-31",6.8 +"1985-11-01",7.4 +"1985-11-02",11.2 +"1985-11-03",9.2 +"1985-11-04",12.6 +"1985-11-05",16.0 +"1985-11-06",17.1 +"1985-11-07",15.3 +"1985-11-08",13.3 +"1985-11-09",15.4 +"1985-11-10",13.2 +"1985-11-11",14.4 +"1985-11-12",14.0 +"1985-11-13",15.5 +"1985-11-14",21.0 +"1985-11-15",10.0 +"1985-11-16",9.6 +"1985-11-17",12.0 +"1985-11-18",12.2 +"1985-11-19",11.3 +"1985-11-20",13.2 +"1985-11-21",10.5 +"1985-11-22",10.1 +"1985-11-23",8.8 +"1985-11-24",13.7 +"1985-11-25",16.2 +"1985-11-26",16.0 +"1985-11-27",14.0 +"1985-11-28",13.7 +"1985-11-29",12.5 +"1985-11-30",12.8 +"1985-12-01",12.3 +"1985-12-02",15.2 +"1985-12-03",15.0 +"1985-12-04",16.4 +"1985-12-05",16.1 +"1985-12-06",14.6 +"1985-12-07",18.2 +"1985-12-08",16.4 +"1985-12-09",16.6 +"1985-12-10",14.7 +"1985-12-11",15.8 +"1985-12-12",14.1 +"1985-12-13",13.5 +"1985-12-14",13.6 +"1985-12-15",13.7 +"1985-12-16",13.6 +"1985-12-17",12.1 +"1985-12-18",12.7 +"1985-12-19",13.3 +"1985-12-20",14.2 +"1985-12-21",15.0 +"1985-12-22",13.7 +"1985-12-23",12.0 +"1985-12-24",13.1 +"1985-12-25",13.2 +"1985-12-26",13.3 +"1985-12-27",11.5 +"1985-12-28",10.8 +"1985-12-29",12.0 +"1985-12-30",16.3 +"1985-12-31",14.4 +"1986-01-01",12.9 +"1986-01-02",13.8 +"1986-01-03",10.6 +"1986-01-04",12.6 +"1986-01-05",13.7 +"1986-01-06",12.6 +"1986-01-07",13.1 +"1986-01-08",15.4 +"1986-01-09",11.9 +"1986-01-10",13.8 +"1986-01-11",14.4 +"1986-01-12",15.2 +"1986-01-13",12.5 +"1986-01-14",12.2 +"1986-01-15",16.1 +"1986-01-16",14.6 +"1986-01-17",11.6 +"1986-01-18",13.1 +"1986-01-19",12.8 +"1986-01-20",15.2 +"1986-01-21",13.8 +"1986-01-22",15.0 +"1986-01-23",13.5 +"1986-01-24",11.8 +"1986-01-25",15.3 +"1986-01-26",13.5 +"1986-01-27",15.3 +"1986-01-28",13.8 +"1986-01-29",15.8 +"1986-01-30",17.4 +"1986-01-31",15.3 +"1986-02-01",14.6 +"1986-02-02",14.8 +"1986-02-03",10.7 +"1986-02-04",11.6 +"1986-02-05",13.6 +"1986-02-06",14.4 +"1986-02-07",11.8 +"1986-02-08",15.8 +"1986-02-09",16.0 +"1986-02-10",11.8 +"1986-02-11",14.5 +"1986-02-12",10.7 +"1986-02-13",14.2 +"1986-02-14",19.5 +"1986-02-15",21.4 +"1986-02-16",17.9 +"1986-02-17",17.4 +"1986-02-18",12.7 +"1986-02-19",13.8 +"1986-02-20",14.0 +"1986-02-21",15.0 +"1986-02-22",14.5 +"1986-02-23",13.1 +"1986-02-24",11.4 +"1986-02-25",12.5 +"1986-02-26",12.0 +"1986-02-27",13.4 +"1986-02-28",14.4 +"1986-03-01",17.7 +"1986-03-02",13.9 +"1986-03-03",13.3 +"1986-03-04",14.6 +"1986-03-05",16.4 +"1986-03-06",16.8 +"1986-03-07",20.0 +"1986-03-08",12.5 +"1986-03-09",12.7 +"1986-03-10",11.7 +"1986-03-11",12.7 +"1986-03-12",8.6 +"1986-03-13",11.9 +"1986-03-14",16.0 +"1986-03-15",15.2 +"1986-03-16",13.4 +"1986-03-17",11.6 +"1986-03-18",11.1 +"1986-03-19",15.6 +"1986-03-20",17.0 +"1986-03-21",18.5 +"1986-03-22",17.4 +"1986-03-23",16.5 +"1986-03-24",16.2 +"1986-03-25",16.1 +"1986-03-26",13.2 +"1986-03-27",18.0 +"1986-03-28",12.8 +"1986-03-29",11.7 +"1986-03-30",16.7 +"1986-03-31",15.6 +"1986-04-01",10.2 +"1986-04-02",10.3 +"1986-04-03",15.0 +"1986-04-04",18.0 +"1986-04-05",13.8 +"1986-04-06",10.5 +"1986-04-07",11.8 +"1986-04-08",7.2 +"1986-04-09",11.6 +"1986-04-10",7.4 +"1986-04-11",14.2 +"1986-04-12",12.2 +"1986-04-13",9.0 +"1986-04-14",12.3 +"1986-04-15",19.7 +"1986-04-16",12.8 +"1986-04-17",12.4 +"1986-04-18",12.0 +"1986-04-19",12.0 +"1986-04-20",11.1 +"1986-04-21",12.7 +"1986-04-22",14.2 +"1986-04-23",11.6 +"1986-04-24",12.0 +"1986-04-25",11.5 +"1986-04-26",8.3 +"1986-04-27",10.5 +"1986-04-28",9.0 +"1986-04-29",6.9 +"1986-04-30",9.4 +"1986-05-01",11.1 +"1986-05-02",9.1 +"1986-05-03",7.7 +"1986-05-04",10.0 +"1986-05-05",10.4 +"1986-05-06",8.0 +"1986-05-07",9.8 +"1986-05-08",12.4 +"1986-05-09",12.9 +"1986-05-10",12.3 +"1986-05-11",6.9 +"1986-05-12",10.5 +"1986-05-13",11.0 +"1986-05-14",9.7 +"1986-05-15",11.1 +"1986-05-16",11.5 +"1986-05-17",13.4 +"1986-05-18",10.9 +"1986-05-19",12.0 +"1986-05-20",12.1 +"1986-05-21",10.4 +"1986-05-22",10.0 +"1986-05-23",9.6 +"1986-05-24",11.3 +"1986-05-25",8.5 +"1986-05-26",6.3 +"1986-05-27",8.2 +"1986-05-28",10.7 +"1986-05-29",10.3 +"1986-05-30",9.5 +"1986-05-31",10.9 +"1986-06-01",10.9 +"1986-06-02",4.3 +"1986-06-03",5.2 +"1986-06-04",11.0 +"1986-06-05",11.6 +"1986-06-06",10.6 +"1986-06-07",9.4 +"1986-06-08",10.0 +"1986-06-09",9.6 +"1986-06-10",9.5 +"1986-06-11",9.7 +"1986-06-12",9.6 +"1986-06-13",7.0 +"1986-06-14",7.0 +"1986-06-15",6.8 +"1986-06-16",6.9 +"1986-06-17",8.0 +"1986-06-18",7.6 +"1986-06-19",8.6 +"1986-06-20",5.7 +"1986-06-21",5.5 +"1986-06-22",5.7 +"1986-06-23",5.7 +"1986-06-24",6.6 +"1986-06-25",6.0 +"1986-06-26",6.9 +"1986-06-27",7.7 +"1986-06-28",8.0 +"1986-06-29",3.9 +"1986-06-30",0.8 +"1986-07-01",2.8 +"1986-07-02",8.0 +"1986-07-03",9.8 +"1986-07-04",11.4 +"1986-07-05",8.6 +"1986-07-06",5.2 +"1986-07-07",6.6 +"1986-07-08",5.7 +"1986-07-09",4.6 +"1986-07-10",5.8 +"1986-07-11",7.0 +"1986-07-12",4.8 +"1986-07-13",4.4 +"1986-07-14",4.4 +"1986-07-15",7.9 +"1986-07-16",10.6 +"1986-07-17",5.0 +"1986-07-18",7.6 +"1986-07-19",9.2 +"1986-07-20",9.7 +"1986-07-21",8.8 +"1986-07-22",6.8 +"1986-07-23",9.4 +"1986-07-24",11.0 +"1986-07-25",2.5 +"1986-07-26",2.1 +"1986-07-27",5.4 +"1986-07-28",6.2 +"1986-07-29",7.8 +"1986-07-30",7.4 +"1986-07-31",9.3 +"1986-08-01",9.3 +"1986-08-02",9.5 +"1986-08-03",8.5 +"1986-08-04",10.0 +"1986-08-05",7.7 +"1986-08-06",9.3 +"1986-08-07",9.1 +"1986-08-08",3.5 +"1986-08-09",3.6 +"1986-08-10",2.5 +"1986-08-11",1.7 +"1986-08-12",2.7 +"1986-08-13",2.9 +"1986-08-14",5.3 +"1986-08-15",7.7 +"1986-08-16",9.1 +"1986-08-17",9.4 +"1986-08-18",7.3 +"1986-08-19",8.4 +"1986-08-20",9.2 +"1986-08-21",6.6 +"1986-08-22",9.7 +"1986-08-23",12.4 +"1986-08-24",10.2 +"1986-08-25",5.9 +"1986-08-26",7.1 +"1986-08-27",7.5 +"1986-08-28",9.7 +"1986-08-29",12.2 +"1986-08-30",5.6 +"1986-08-31",5.4 +"1986-09-01",8.3 +"1986-09-02",10.6 +"1986-09-03",9.1 +"1986-09-04",11.3 +"1986-09-05",10.9 +"1986-09-06",8.9 +"1986-09-07",6.3 +"1986-09-08",9.0 +"1986-09-09",6.1 +"1986-09-10",9.1 +"1986-09-11",9.6 +"1986-09-12",6.0 +"1986-09-13",10.0 +"1986-09-14",11.0 +"1986-09-15",6.2 +"1986-09-16",8.3 +"1986-09-17",11.3 +"1986-09-18",11.3 +"1986-09-19",6.7 +"1986-09-20",6.6 +"1986-09-21",11.4 +"1986-09-22",6.9 +"1986-09-23",10.6 +"1986-09-24",8.6 +"1986-09-25",11.3 +"1986-09-26",12.5 +"1986-09-27",9.9 +"1986-09-28",6.9 +"1986-09-29",5.5 +"1986-09-30",7.8 +"1986-10-01",11.0 +"1986-10-02",16.2 +"1986-10-03",9.9 +"1986-10-04",8.7 +"1986-10-05",10.5 +"1986-10-06",12.2 +"1986-10-07",10.6 +"1986-10-08",8.3 +"1986-10-09",5.5 +"1986-10-10",9.0 +"1986-10-11",6.4 +"1986-10-12",7.2 +"1986-10-13",12.9 +"1986-10-14",12.0 +"1986-10-15",7.3 +"1986-10-16",9.7 +"1986-10-17",8.4 +"1986-10-18",14.7 +"1986-10-19",9.5 +"1986-10-20",7.9 +"1986-10-21",6.8 +"1986-10-22",12.6 +"1986-10-23",5.2 +"1986-10-24",7.5 +"1986-10-25",8.7 +"1986-10-26",7.6 +"1986-10-27",9.0 +"1986-10-28",7.2 +"1986-10-29",10.7 +"1986-10-30",13.1 +"1986-10-31",13.9 +"1986-11-01",10.8 +"1986-11-02",10.4 +"1986-11-03",9.1 +"1986-11-04",16.0 +"1986-11-05",21.0 +"1986-11-06",16.2 +"1986-11-07",8.6 +"1986-11-08",9.2 +"1986-11-09",12.5 +"1986-11-10",9.7 +"1986-11-11",12.5 +"1986-11-12",10.3 +"1986-11-13",12.0 +"1986-11-14",11.0 +"1986-11-15",14.8 +"1986-11-16",15.0 +"1986-11-17",15.3 +"1986-11-18",10.3 +"1986-11-19",10.7 +"1986-11-20",10.5 +"1986-11-21",8.9 +"1986-11-22",8.1 +"1986-11-23",11.5 +"1986-11-24",12.8 +"1986-11-25",9.1 +"1986-11-26",14.6 +"1986-11-27",11.6 +"1986-11-28",11.2 +"1986-11-29",12.6 +"1986-11-30",7.5 +"1986-12-01",11.0 +"1986-12-02",14.5 +"1986-12-03",18.5 +"1986-12-04",15.4 +"1986-12-05",13.1 +"1986-12-06",16.3 +"1986-12-07",20.2 +"1986-12-08",11.5 +"1986-12-09",12.4 +"1986-12-10",10.9 +"1986-12-11",12.7 +"1986-12-12",12.2 +"1986-12-13",12.4 +"1986-12-14",9.8 +"1986-12-15",8.5 +"1986-12-16",14.7 +"1986-12-17",12.0 +"1986-12-18",10.3 +"1986-12-19",11.0 +"1986-12-20",10.2 +"1986-12-21",12.6 +"1986-12-22",11.6 +"1986-12-23",9.7 +"1986-12-24",13.4 +"1986-12-25",10.5 +"1986-12-26",14.7 +"1986-12-27",14.6 +"1986-12-28",14.2 +"1986-12-29",13.2 +"1986-12-30",11.7 +"1986-12-31",17.2 +"1987-01-01",12.3 +"1987-01-02",13.8 +"1987-01-03",15.3 +"1987-01-04",15.6 +"1987-01-05",16.2 +"1987-01-06",16.3 +"1987-01-07",16.8 +"1987-01-08",11.0 +"1987-01-09",8.5 +"1987-01-10",13.2 +"1987-01-11",13.0 +"1987-01-12",12.4 +"1987-01-13",13.0 +"1987-01-14",16.6 +"1987-01-15",12.0 +"1987-01-16",12.4 +"1987-01-17",15.0 +"1987-01-18",11.8 +"1987-01-19",11.6 +"1987-01-20",12.2 +"1987-01-21",13.7 +"1987-01-22",11.2 +"1987-01-23",12.4 +"1987-01-24",11.5 +"1987-01-25",13.8 +"1987-01-26",15.7 +"1987-01-27",12.9 +"1987-01-28",11.5 +"1987-01-29",11.0 +"1987-01-30",12.7 +"1987-01-31",14.9 +"1987-02-01",16.5 +"1987-02-02",12.8 +"1987-02-03",12.7 +"1987-02-04",12.7 +"1987-02-05",11.6 +"1987-02-06",13.3 +"1987-02-07",15.2 +"1987-02-08",16.4 +"1987-02-09",11.9 +"1987-02-10",15.1 +"1987-02-11",10.6 +"1987-02-12",13.6 +"1987-02-13",12.1 +"1987-02-14",16.0 +"1987-02-15",16.8 +"1987-02-16",16.6 +"1987-02-17",15.6 +"1987-02-18",15.2 +"1987-02-19",17.7 +"1987-02-20",21.0 +"1987-02-21",13.4 +"1987-02-22",10.5 +"1987-02-23",9.5 +"1987-02-24",12.0 +"1987-02-25",10.4 +"1987-02-26",11.5 +"1987-02-27",13.2 +"1987-02-28",15.0 +"1987-03-01",14.1 +"1987-03-02",12.4 +"1987-03-03",13.4 +"1987-03-04",12.5 +"1987-03-05",14.3 +"1987-03-06",17.6 +"1987-03-07",10.4 +"1987-03-08",9.9 +"1987-03-09",10.2 +"1987-03-10",11.3 +"1987-03-11",9.5 +"1987-03-12",11.8 +"1987-03-13",11.5 +"1987-03-14",10.5 +"1987-03-15",10.8 +"1987-03-16",13.0 +"1987-03-17",18.5 +"1987-03-18",18.7 +"1987-03-19",15.0 +"1987-03-20",13.0 +"1987-03-21",11.3 +"1987-03-22",13.0 +"1987-03-23",13.3 +"1987-03-24",11.0 +"1987-03-25",10.3 +"1987-03-26",13.0 +"1987-03-27",12.3 +"1987-03-28",15.6 +"1987-03-29",10.2 +"1987-03-30",10.8 +"1987-03-31",12.0 +"1987-04-01",13.3 +"1987-04-02",11.7 +"1987-04-03",12.5 +"1987-04-04",13.7 +"1987-04-05",14.9 +"1987-04-06",20.2 +"1987-04-07",16.3 +"1987-04-08",13.9 +"1987-04-09",10.1 +"1987-04-10",7.3 +"1987-04-11",14.0 +"1987-04-12",17.7 +"1987-04-13",16.3 +"1987-04-14",10.6 +"1987-04-15",9.7 +"1987-04-16",7.8 +"1987-04-17",10.4 +"1987-04-18",10.4 +"1987-04-19",14.1 +"1987-04-20",7.1 +"1987-04-21",8.1 +"1987-04-22",7.8 +"1987-04-23",10.6 +"1987-04-24",9.1 +"1987-04-25",9.0 +"1987-04-26",11.9 +"1987-04-27",17.1 +"1987-04-28",16.8 +"1987-04-29",13.5 +"1987-04-30",11.6 +"1987-05-01",7.0 +"1987-05-02",9.7 +"1987-05-03",9.9 +"1987-05-04",11.2 +"1987-05-05",11.3 +"1987-05-06",11.8 +"1987-05-07",9.9 +"1987-05-08",7.1 +"1987-05-09",9.6 +"1987-05-10",9.8 +"1987-05-11",10.6 +"1987-05-12",12.8 +"1987-05-13",16.5 +"1987-05-14",11.7 +"1987-05-15",12.3 +"1987-05-16",12.2 +"1987-05-17",11.8 +"1987-05-18",10.7 +"1987-05-19",10.2 +"1987-05-20",10.0 +"1987-05-21",8.3 +"1987-05-22",6.6 +"1987-05-23",9.5 +"1987-05-24",12.3 +"1987-05-25",7.6 +"1987-05-26",9.3 +"1987-05-27",5.0 +"1987-05-28",4.3 +"1987-05-29",6.4 +"1987-05-30",10.8 +"1987-05-31",7.8 +"1987-06-01",8.5 +"1987-06-02",9.7 +"1987-06-03",10.0 +"1987-06-04",11.0 +"1987-06-05",10.2 +"1987-06-06",6.6 +"1987-06-07",6.1 +"1987-06-08",5.9 +"1987-06-09",8.9 +"1987-06-10",13.0 +"1987-06-11",12.6 +"1987-06-12",5.4 +"1987-06-13",6.0 +"1987-06-14",7.8 +"1987-06-15",9.0 +"1987-06-16",4.2 +"1987-06-17",3.0 +"1987-06-18",4.5 +"1987-06-19",6.2 +"1987-06-20",11.9 +"1987-06-21",11.8 +"1987-06-22",9.4 +"1987-06-23",9.6 +"1987-06-24",9.4 +"1987-06-25",7.0 +"1987-06-26",8.9 +"1987-06-27",9.3 +"1987-06-28",6.8 +"1987-06-29",7.5 +"1987-06-30",8.0 +"1987-07-01",8.3 +"1987-07-02",2.7 +"1987-07-03",3.9 +"1987-07-04",4.1 +"1987-07-05",5.0 +"1987-07-06",5.8 +"1987-07-07",4.4 +"1987-07-08",4.1 +"1987-07-09",5.8 +"1987-07-10",9.1 +"1987-07-11",7.9 +"1987-07-12",5.0 +"1987-07-13",2.8 +"1987-07-14",4.7 +"1987-07-15",8.9 +"1987-07-16",5.4 +"1987-07-17",7.1 +"1987-07-18",9.0 +"1987-07-19",9.4 +"1987-07-20",6.3 +"1987-07-21",7.0 +"1987-07-22",6.4 +"1987-07-23",6.7 +"1987-07-24",1.5 +"1987-07-25",2.9 +"1987-07-26",4.8 +"1987-07-27",6.3 +"1987-07-28",5.7 +"1987-07-29",7.0 +"1987-07-30",8.8 +"1987-07-31",8.7 +"1987-08-01",9.0 +"1987-08-02",9.6 +"1987-08-03",8.0 +"1987-08-04",8.4 +"1987-08-05",8.1 +"1987-08-06",9.0 +"1987-08-07",5.3 +"1987-08-08",8.9 +"1987-08-09",8.7 +"1987-08-10",4.9 +"1987-08-11",7.0 +"1987-08-12",7.5 +"1987-08-13",7.0 +"1987-08-14",9.1 +"1987-08-15",11.8 +"1987-08-16",9.9 +"1987-08-17",5.6 +"1987-08-18",4.2 +"1987-08-19",4.3 +"1987-08-20",8.0 +"1987-08-21",5.1 +"1987-08-22",9.4 +"1987-08-23",9.1 +"1987-08-24",9.7 +"1987-08-25",10.6 +"1987-08-26",8.6 +"1987-08-27",10.1 +"1987-08-28",11.0 +"1987-08-29",9.7 +"1987-08-30",5.0 +"1987-08-31",6.1 +"1987-09-01",5.4 +"1987-09-02",5.8 +"1987-09-03",7.3 +"1987-09-04",6.3 +"1987-09-05",4.8 +"1987-09-06",7.6 +"1987-09-07",8.1 +"1987-09-08",9.5 +"1987-09-09",10.3 +"1987-09-10",7.0 +"1987-09-11",9.0 +"1987-09-12",10.2 +"1987-09-13",6.8 +"1987-09-14",9.3 +"1987-09-15",9.8 +"1987-09-16",10.7 +"1987-09-17",7.8 +"1987-09-18",9.2 +"1987-09-19",15.0 +"1987-09-20",7.8 +"1987-09-21",5.3 +"1987-09-22",9.5 +"1987-09-23",7.6 +"1987-09-24",14.0 +"1987-09-25",14.9 +"1987-09-26",14.9 +"1987-09-27",19.2 +"1987-09-28",17.0 +"1987-09-29",13.0 +"1987-09-30",11.2 +"1987-10-01",9.5 +"1987-10-02",10.3 +"1987-10-03",9.3 +"1987-10-04",11.3 +"1987-10-05",6.5 +"1987-10-06",12.0 +"1987-10-07",8.3 +"1987-10-08",8.7 +"1987-10-09",8.7 +"1987-10-10",10.2 +"1987-10-11",6.9 +"1987-10-12",4.9 +"1987-10-13",10.0 +"1987-10-14",7.6 +"1987-10-15",14.5 +"1987-10-16",13.2 +"1987-10-17",9.9 +"1987-10-18",10.1 +"1987-10-19",11.3 +"1987-10-20",10.4 +"1987-10-21",10.9 +"1987-10-22",9.2 +"1987-10-23",10.5 +"1987-10-24",11.4 +"1987-10-25",13.5 +"1987-10-26",9.8 +"1987-10-27",13.1 +"1987-10-28",9.7 +"1987-10-29",11.4 +"1987-10-30",9.9 +"1987-10-31",14.4 +"1987-11-01",19.0 +"1987-11-02",23.0 +"1987-11-03",15.4 +"1987-11-04",9.6 +"1987-11-05",10.8 +"1987-11-06",12.1 +"1987-11-07",11.0 +"1987-11-08",12.6 +"1987-11-09",14.7 +"1987-11-10",11.1 +"1987-11-11",10.1 +"1987-11-12",11.4 +"1987-11-13",13.0 +"1987-11-14",11.9 +"1987-11-15",9.5 +"1987-11-16",13.5 +"1987-11-17",15.2 +"1987-11-18",18.4 +"1987-11-19",24.1 +"1987-11-20",14.1 +"1987-11-21",10.7 +"1987-11-22",8.7 +"1987-11-23",13.3 +"1987-11-24",11.6 +"1987-11-25",9.9 +"1987-11-26",10.8 +"1987-11-27",11.5 +"1987-11-28",10.0 +"1987-11-29",13.9 +"1987-11-30",13.6 +"1987-12-01",11.9 +"1987-12-02",11.1 +"1987-12-03",8.2 +"1987-12-04",9.4 +"1987-12-05",12.7 +"1987-12-06",11.6 +"1987-12-07",11.0 +"1987-12-08",11.3 +"1987-12-09",13.4 +"1987-12-10",14.9 +"1987-12-11",15.2 +"1987-12-12",13.9 +"1987-12-13",15.0 +"1987-12-14",16.2 +"1987-12-15",17.7 +"1987-12-16",20.5 +"1987-12-17",14.7 +"1987-12-18",12.5 +"1987-12-19",10.9 +"1987-12-20",12.8 +"1987-12-21",12.7 +"1987-12-22",11.2 +"1987-12-23",11.4 +"1987-12-24",11.2 +"1987-12-25",12.1 +"1987-12-26",12.7 +"1987-12-27",16.2 +"1987-12-28",14.2 +"1987-12-29",14.3 +"1987-12-30",13.3 +"1987-12-31",16.7 +"1988-01-01",15.3 +"1988-01-02",14.3 +"1988-01-03",13.5 +"1988-01-04",15.0 +"1988-01-05",13.6 +"1988-01-06",15.2 +"1988-01-07",17.0 +"1988-01-08",18.7 +"1988-01-09",16.5 +"1988-01-10",17.4 +"1988-01-11",18.3 +"1988-01-12",18.3 +"1988-01-13",22.4 +"1988-01-14",21.4 +"1988-01-15",20.9 +"1988-01-16",17.6 +"1988-01-17",15.5 +"1988-01-18",16.6 +"1988-01-19",16.2 +"1988-01-20",15.6 +"1988-01-21",14.5 +"1988-01-22",14.0 +"1988-01-23",15.6 +"1988-01-24",12.3 +"1988-01-25",11.6 +"1988-01-26",12.6 +"1988-01-27",14.9 +"1988-01-28",17.3 +"1988-01-29",21.4 +"1988-01-30",23.4 +"1988-01-31",14.4 +"1988-02-01",14.1 +"1988-02-02",15.0 +"1988-02-03",14.5 +"1988-02-04",15.1 +"1988-02-05",13.9 +"1988-02-06",13.4 +"1988-02-07",9.2 +"1988-02-08",12.5 +"1988-02-09",15.1 +"1988-02-10",12.1 +"1988-02-11",14.5 +"1988-02-12",16.3 +"1988-02-13",16.5 +"1988-02-14",14.9 +"1988-02-15",13.2 +"1988-02-16",11.8 +"1988-02-17",13.6 +"1988-02-18",16.2 +"1988-02-19",14.1 +"1988-02-20",13.5 +"1988-02-21",15.0 +"1988-02-22",14.8 +"1988-02-23",16.2 +"1988-02-24",16.2 +"1988-02-25",13.3 +"1988-02-26",15.3 +"1988-02-27",18.4 +"1988-02-28",16.2 +"1988-02-29",16.3 +"1988-03-01",12.4 +"1988-03-02",15.6 +"1988-03-03",14.9 +"1988-03-04",14.8 +"1988-03-05",12.7 +"1988-03-06",14.2 +"1988-03-07",16.8 +"1988-03-08",16.7 +"1988-03-09",16.2 +"1988-03-10",14.5 +"1988-03-11",10.0 +"1988-03-12",12.6 +"1988-03-13",11.9 +"1988-03-14",11.8 +"1988-03-15",13.4 +"1988-03-16",14.5 +"1988-03-17",15.7 +"1988-03-18",15.3 +"1988-03-19",13.9 +"1988-03-20",13.7 +"1988-03-21",15.1 +"1988-03-22",15.6 +"1988-03-23",14.4 +"1988-03-24",13.9 +"1988-03-25",16.2 +"1988-03-26",16.7 +"1988-03-27",15.5 +"1988-03-28",16.4 +"1988-03-29",17.5 +"1988-03-30",18.2 +"1988-03-31",16.1 +"1988-04-01",16.5 +"1988-04-02",14.6 +"1988-04-03",16.4 +"1988-04-04",13.6 +"1988-04-05",15.9 +"1988-04-06",11.9 +"1988-04-07",14.7 +"1988-04-08",9.4 +"1988-04-09",6.6 +"1988-04-10",7.9 +"1988-04-11",11.0 +"1988-04-12",15.7 +"1988-04-13",15.2 +"1988-04-14",15.9 +"1988-04-15",10.6 +"1988-04-16",8.3 +"1988-04-17",8.6 +"1988-04-18",12.7 +"1988-04-19",10.5 +"1988-04-20",12.0 +"1988-04-21",11.1 +"1988-04-22",13.0 +"1988-04-23",12.4 +"1988-04-24",13.3 +"1988-04-25",15.9 +"1988-04-26",12.0 +"1988-04-27",13.7 +"1988-04-28",17.6 +"1988-04-29",14.3 +"1988-04-30",13.7 +"1988-05-01",15.2 +"1988-05-02",14.5 +"1988-05-03",14.9 +"1988-05-04",15.5 +"1988-05-05",16.4 +"1988-05-06",14.5 +"1988-05-07",12.6 +"1988-05-08",13.6 +"1988-05-09",11.2 +"1988-05-10",11.0 +"1988-05-11",12.0 +"1988-05-12",6.8 +"1988-05-13",10.6 +"1988-05-14",13.1 +"1988-05-15",13.5 +"1988-05-16",11.7 +"1988-05-17",13.2 +"1988-05-18",12.0 +"1988-05-19",10.4 +"1988-05-20",10.0 +"1988-05-21",8.2 +"1988-05-22",9.4 +"1988-05-23",10.3 +"1988-05-24",8.1 +"1988-05-25",8.7 +"1988-05-26",12.6 +"1988-05-27",10.9 +"1988-05-28",8.7 +"1988-05-29",9.3 +"1988-05-30",6.3 +"1988-05-31",7.8 +"1988-06-01",10.0 +"1988-06-02",11.0 +"1988-06-03",11.1 +"1988-06-04",12.6 +"1988-06-05",10.2 +"1988-06-06",11.1 +"1988-06-07",8.7 +"1988-06-08",9.5 +"1988-06-09",9.7 +"1988-06-10",8.2 +"1988-06-11",5.0 +"1988-06-12",6.5 +"1988-06-13",12.1 +"1988-06-14",8.9 +"1988-06-15",6.1 +"1988-06-16",2.8 +"1988-06-17",3.7 +"1988-06-18",6.8 +"1988-06-19",6.6 +"1988-06-20",7.0 +"1988-06-21",7.3 +"1988-06-22",7.9 +"1988-06-23",10.6 +"1988-06-24",8.1 +"1988-06-25",6.7 +"1988-06-26",8.0 +"1988-06-27",10.0 +"1988-06-28",6.7 +"1988-06-29",9.4 +"1988-06-30",9.3 +"1988-07-01",6.0 +"1988-07-02",5.8 +"1988-07-03",4.9 +"1988-07-04",5.0 +"1988-07-05",8.4 +"1988-07-06",12.3 +"1988-07-07",13.0 +"1988-07-08",11.4 +"1988-07-09",6.8 +"1988-07-10",7.6 +"1988-07-11",12.4 +"1988-07-12",7.1 +"1988-07-13",7.5 +"1988-07-14",10.0 +"1988-07-15",5.3 +"1988-07-16",6.3 +"1988-07-17",8.0 +"1988-07-18",8.3 +"1988-07-19",9.3 +"1988-07-20",9.5 +"1988-07-21",5.6 +"1988-07-22",7.0 +"1988-07-23",8.5 +"1988-07-24",8.5 +"1988-07-25",8.2 +"1988-07-26",8.5 +"1988-07-27",9.6 +"1988-07-28",9.7 +"1988-07-29",7.1 +"1988-07-30",8.4 +"1988-07-31",9.2 +"1988-08-01",9.8 +"1988-08-02",8.1 +"1988-08-03",9.4 +"1988-08-04",10.0 +"1988-08-05",5.1 +"1988-08-06",6.7 +"1988-08-07",6.9 +"1988-08-08",6.8 +"1988-08-09",8.6 +"1988-08-10",9.1 +"1988-08-11",3.9 +"1988-08-12",4.8 +"1988-08-13",8.4 +"1988-08-14",11.6 +"1988-08-15",12.1 +"1988-08-16",12.4 +"1988-08-17",10.0 +"1988-08-18",10.1 +"1988-08-19",9.7 +"1988-08-20",11.7 +"1988-08-21",7.9 +"1988-08-22",8.6 +"1988-08-23",7.7 +"1988-08-24",5.8 +"1988-08-25",8.7 +"1988-08-26",10.6 +"1988-08-27",6.7 +"1988-08-28",8.8 +"1988-08-29",9.7 +"1988-08-30",9.0 +"1988-08-31",11.8 +"1988-09-01",15.2 +"1988-09-02",10.0 +"1988-09-03",10.5 +"1988-09-04",5.5 +"1988-09-05",9.4 +"1988-09-06",8.8 +"1988-09-07",5.3 +"1988-09-08",13.0 +"1988-09-09",15.2 +"1988-09-10",13.2 +"1988-09-11",11.5 +"1988-09-12",6.8 +"1988-09-13",4.7 +"1988-09-14",5.2 +"1988-09-15",6.8 +"1988-09-16",10.7 +"1988-09-17",10.1 +"1988-09-18",10.0 +"1988-09-19",9.8 +"1988-09-20",5.5 +"1988-09-21",13.5 +"1988-09-22",16.6 +"1988-09-23",8.4 +"1988-09-24",8.2 +"1988-09-25",11.1 +"1988-09-26",10.8 +"1988-09-27",8.8 +"1988-09-28",10.8 +"1988-09-29",8.7 +"1988-09-30",12.4 +"1988-10-01",9.0 +"1988-10-02",13.5 +"1988-10-03",14.7 +"1988-10-04",10.9 +"1988-10-05",8.5 +"1988-10-06",6.0 +"1988-10-07",12.7 +"1988-10-08",11.1 +"1988-10-09",8.7 +"1988-10-10",12.3 +"1988-10-11",13.3 +"1988-10-12",5.6 +"1988-10-13",13.7 +"1988-10-14",8.5 +"1988-10-15",11.2 +"1988-10-16",8.7 +"1988-10-17",11.7 +"1988-10-18",12.5 +"1988-10-19",8.2 +"1988-10-20",15.6 +"1988-10-21",10.3 +"1988-10-22",11.4 +"1988-10-23",9.7 +"1988-10-24",6.3 +"1988-10-25",14.3 +"1988-10-26",11.3 +"1988-10-27",7.3 +"1988-10-28",12.8 +"1988-10-29",11.9 +"1988-10-30",14.3 +"1988-10-31",11.6 +"1988-11-01",13.2 +"1988-11-02",15.5 +"1988-11-03",14.1 +"1988-11-04",9.5 +"1988-11-05",7.2 +"1988-11-06",11.8 +"1988-11-07",16.8 +"1988-11-08",12.5 +"1988-11-09",9.4 +"1988-11-10",11.9 +"1988-11-11",10.3 +"1988-11-12",16.9 +"1988-11-13",17.5 +"1988-11-14",7.5 +"1988-11-15",8.6 +"1988-11-16",11.1 +"1988-11-17",11.5 +"1988-11-18",10.7 +"1988-11-19",15.7 +"1988-11-20",12.8 +"1988-11-21",13.0 +"1988-11-22",12.9 +"1988-11-23",14.3 +"1988-11-24",13.7 +"1988-11-25",12.1 +"1988-11-26",11.9 +"1988-11-27",11.8 +"1988-11-28",11.4 +"1988-11-29",10.3 +"1988-11-30",11.7 +"1988-12-01",12.0 +"1988-12-02",17.4 +"1988-12-03",16.8 +"1988-12-04",16.2 +"1988-12-05",13.0 +"1988-12-06",12.5 +"1988-12-07",12.4 +"1988-12-08",16.1 +"1988-12-09",20.2 +"1988-12-10",14.3 +"1988-12-11",11.0 +"1988-12-12",14.4 +"1988-12-13",15.7 +"1988-12-14",19.7 +"1988-12-15",20.7 +"1988-12-16",23.9 +"1988-12-17",16.6 +"1988-12-18",17.5 +"1988-12-19",14.9 +"1988-12-20",13.6 +"1988-12-21",11.9 +"1988-12-22",15.2 +"1988-12-23",17.3 +"1988-12-24",19.8 +"1988-12-25",15.8 +"1988-12-26",9.5 +"1988-12-27",12.9 +"1988-12-28",12.9 +"1988-12-29",14.8 +"1988-12-30",14.1 +"1989-01-01",14.3 +"1989-01-02",17.4 +"1989-01-03",18.5 +"1989-01-04",16.8 +"1989-01-05",11.5 +"1989-01-06",9.5 +"1989-01-07",12.2 +"1989-01-08",15.7 +"1989-01-09",16.3 +"1989-01-10",13.6 +"1989-01-11",12.6 +"1989-01-12",13.8 +"1989-01-13",12.1 +"1989-01-14",13.4 +"1989-01-15",17.3 +"1989-01-16",19.4 +"1989-01-17",16.6 +"1989-01-18",13.9 +"1989-01-19",13.1 +"1989-01-20",16.0 +"1989-01-21",14.5 +"1989-01-22",15.0 +"1989-01-23",12.6 +"1989-01-24",12.5 +"1989-01-25",15.2 +"1989-01-26",16.2 +"1989-01-27",16.5 +"1989-01-28",20.1 +"1989-01-29",20.6 +"1989-01-30",16.9 +"1989-01-31",16.5 +"1989-02-01",16.1 +"1989-02-02",14.4 +"1989-02-03",16.3 +"1989-02-04",15.7 +"1989-02-05",14.2 +"1989-02-06",13.2 +"1989-02-07",16.8 +"1989-02-08",18.5 +"1989-02-09",16.7 +"1989-02-10",15.3 +"1989-02-11",15.9 +"1989-02-12",15.2 +"1989-02-13",17.5 +"1989-02-14",18.3 +"1989-02-15",19.4 +"1989-02-16",19.4 +"1989-02-17",19.5 +"1989-02-18",20.5 +"1989-02-19",15.7 +"1989-02-20",15.0 +"1989-02-21",16.1 +"1989-02-22",14.3 +"1989-02-23",13.0 +"1989-02-24",16.2 +"1989-02-25",17.7 +"1989-02-26",13.2 +"1989-02-27",15.8 +"1989-02-28",18.5 +"1989-03-01",20.4 +"1989-03-02",22.0 +"1989-03-03",19.7 +"1989-03-04",19.6 +"1989-03-05",20.3 +"1989-03-06",18.3 +"1989-03-07",18.9 +"1989-03-08",20.3 +"1989-03-09",21.4 +"1989-03-10",18.3 +"1989-03-11",17.8 +"1989-03-12",17.7 +"1989-03-13",12.8 +"1989-03-14",15.1 +"1989-03-15",15.0 +"1989-03-16",14.8 +"1989-03-17",12.0 +"1989-03-18",12.5 +"1989-03-19",15.0 +"1989-03-20",17.1 +"1989-03-21",17.3 +"1989-03-22",16.9 +"1989-03-23",16.5 +"1989-03-24",13.6 +"1989-03-25",13.2 +"1989-03-26",9.4 +"1989-03-27",9.5 +"1989-03-28",11.8 +"1989-03-29",10.4 +"1989-03-30",9.7 +"1989-03-31",12.6 +"1989-04-01",13.3 +"1989-04-02",15.1 +"1989-04-03",14.2 +"1989-04-04",14.2 +"1989-04-05",19.2 +"1989-04-06",12.6 +"1989-04-07",14.2 +"1989-04-08",11.9 +"1989-04-09",13.9 +"1989-04-10",13.5 +"1989-04-11",15.3 +"1989-04-12",13.9 +"1989-04-13",14.0 +"1989-04-14",12.9 +"1989-04-15",8.5 +"1989-04-16",11.4 +"1989-04-17",10.9 +"1989-04-18",12.0 +"1989-04-19",8.6 +"1989-04-20",9.0 +"1989-04-21",9.6 +"1989-04-22",10.2 +"1989-04-23",9.8 +"1989-04-24",8.3 +"1989-04-25",11.0 +"1989-04-26",11.9 +"1989-04-27",14.0 +"1989-04-28",15.8 +"1989-04-29",14.5 +"1989-04-30",13.2 +"1989-05-01",14.2 +"1989-05-02",14.6 +"1989-05-03",11.8 +"1989-05-04",14.4 +"1989-05-05",10.4 +"1989-05-06",10.3 +"1989-05-07",10.8 +"1989-05-08",10.5 +"1989-05-09",9.5 +"1989-05-10",12.5 +"1989-05-11",13.7 +"1989-05-12",12.7 +"1989-05-13",11.9 +"1989-05-14",11.4 +"1989-05-15",9.7 +"1989-05-16",8.3 +"1989-05-17",8.1 +"1989-05-18",11.7 +"1989-05-19",11.6 +"1989-05-20",7.4 +"1989-05-21",5.2 +"1989-05-22",11.0 +"1989-05-23",9.5 +"1989-05-24",9.2 +"1989-05-25",10.7 +"1989-05-26",9.0 +"1989-05-27",10.2 +"1989-05-28",10.3 +"1989-05-29",12.1 +"1989-05-30",13.2 +"1989-05-31",6.6 +"1989-06-01",2.3 +"1989-06-02",1.4 +"1989-06-03",2.1 +"1989-06-04",6.6 +"1989-06-05",8.9 +"1989-06-06",7.8 +"1989-06-07",9.0 +"1989-06-08",10.3 +"1989-06-09",7.9 +"1989-06-10",7.2 +"1989-06-11",8.6 +"1989-06-12",8.8 +"1989-06-13",6.2 +"1989-06-14",9.5 +"1989-06-15",10.2 +"1989-06-16",9.7 +"1989-06-17",11.2 +"1989-06-18",10.2 +"1989-06-19",10.1 +"1989-06-20",8.1 +"1989-06-21",6.6 +"1989-06-22",5.0 +"1989-06-23",4.7 +"1989-06-24",5.3 +"1989-06-25",4.5 +"1989-06-26",2.3 +"1989-06-27",1.4 +"1989-06-28",0.5 +"1989-06-29",2.4 +"1989-06-30",8.0 +"1989-07-01",6.0 +"1989-07-02",7.1 +"1989-07-03",9.7 +"1989-07-04",6.9 +"1989-07-05",5.3 +"1989-07-06",7.0 +"1989-07-07",6.2 +"1989-07-08",7.0 +"1989-07-09",9.7 +"1989-07-10",8.0 +"1989-07-11",8.5 +"1989-07-12",7.1 +"1989-07-13",7.5 +"1989-07-14",3.3 +"1989-07-15",1.8 +"1989-07-16",2.6 +"1989-07-17",5.3 +"1989-07-18",5.8 +"1989-07-19",5.8 +"1989-07-20",7.2 +"1989-07-21",5.3 +"1989-07-22",1.6 +"1989-07-23",3.1 +"1989-07-24",5.3 +"1989-07-25",7.7 +"1989-07-26",4.2 +"1989-07-27",5.5 +"1989-07-28",9.0 +"1989-07-29",11.2 +"1989-07-30",8.0 +"1989-07-31",7.6 +"1989-08-01",3.7 +"1989-08-02",7.5 +"1989-08-03",8.1 +"1989-08-04",8.4 +"1989-08-05",7.1 +"1989-08-06",7.6 +"1989-08-07",7.6 +"1989-08-08",5.6 +"1989-08-09",7.0 +"1989-08-10",10.5 +"1989-08-11",7.3 +"1989-08-12",7.8 +"1989-08-13",5.8 +"1989-08-14",3.8 +"1989-08-15",5.8 +"1989-08-16",6.7 +"1989-08-17",6.6 +"1989-08-18",6.6 +"1989-08-19",9.0 +"1989-08-20",8.1 +"1989-08-21",5.1 +"1989-08-22",8.6 +"1989-08-23",7.0 +"1989-08-24",5.5 +"1989-08-25",7.4 +"1989-08-26",6.2 +"1989-08-27",4.2 +"1989-08-28",6.3 +"1989-08-29",7.0 +"1989-08-30",4.0 +"1989-08-31",8.0 +"1989-09-01",8.8 +"1989-09-02",8.8 +"1989-09-03",6.1 +"1989-09-04",8.6 +"1989-09-05",8.9 +"1989-09-06",7.8 +"1989-09-07",5.0 +"1989-09-08",7.0 +"1989-09-09",13.3 +"1989-09-10",7.9 +"1989-09-11",7.5 +"1989-09-12",8.3 +"1989-09-13",7.2 +"1989-09-14",6.5 +"1989-09-15",8.9 +"1989-09-16",7.4 +"1989-09-17",9.9 +"1989-09-18",9.3 +"1989-09-19",10.6 +"1989-09-20",8.6 +"1989-09-21",7.2 +"1989-09-22",12.6 +"1989-09-23",7.8 +"1989-09-24",6.3 +"1989-09-25",9.2 +"1989-09-26",5.8 +"1989-09-27",9.0 +"1989-09-28",5.0 +"1989-09-29",11.9 +"1989-09-30",13.4 +"1989-10-01",10.5 +"1989-10-02",6.2 +"1989-10-03",5.1 +"1989-10-04",9.5 +"1989-10-05",11.7 +"1989-10-06",9.2 +"1989-10-07",7.3 +"1989-10-08",9.7 +"1989-10-09",9.4 +"1989-10-10",10.0 +"1989-10-11",10.9 +"1989-10-12",11.0 +"1989-10-13",10.9 +"1989-10-14",8.0 +"1989-10-15",11.2 +"1989-10-16",7.5 +"1989-10-17",7.2 +"1989-10-18",13.2 +"1989-10-19",12.9 +"1989-10-20",9.4 +"1989-10-21",10.2 +"1989-10-22",9.5 +"1989-10-23",12.4 +"1989-10-24",10.2 +"1989-10-25",13.4 +"1989-10-26",11.6 +"1989-10-27",8.0 +"1989-10-28",9.0 +"1989-10-29",9.3 +"1989-10-30",13.5 +"1989-10-31",8.0 +"1989-11-01",8.1 +"1989-11-02",10.0 +"1989-11-03",8.5 +"1989-11-04",12.5 +"1989-11-05",15.0 +"1989-11-06",13.3 +"1989-11-07",11.0 +"1989-11-08",11.9 +"1989-11-09",8.3 +"1989-11-10",9.7 +"1989-11-11",11.3 +"1989-11-12",12.5 +"1989-11-13",9.4 +"1989-11-14",11.4 +"1989-11-15",13.2 +"1989-11-16",13.8 +"1989-11-17",16.0 +"1989-11-18",10.9 +"1989-11-19",11.9 +"1989-11-20",12.4 +"1989-11-21",13.2 +"1989-11-22",15.5 +"1989-11-23",21.6 +"1989-11-24",14.9 +"1989-11-25",14.4 +"1989-11-26",12.9 +"1989-11-27",13.1 +"1989-11-28",14.0 +"1989-11-29",17.9 +"1989-11-30",17.7 +"1989-12-01",16.3 +"1989-12-02",18.3 +"1989-12-03",13.7 +"1989-12-04",13.3 +"1989-12-05",10.6 +"1989-12-06",14.1 +"1989-12-07",16.0 +"1989-12-08",16.5 +"1989-12-09",14.1 +"1989-12-10",18.7 +"1989-12-11",16.2 +"1989-12-12",14.8 +"1989-12-13",12.6 +"1989-12-14",10.4 +"1989-12-15",12.2 +"1989-12-16",12.6 +"1989-12-17",12.1 +"1989-12-18",17.3 +"1989-12-19",16.4 +"1989-12-20",12.6 +"1989-12-21",12.3 +"1989-12-22",11.8 +"1989-12-23",12.0 +"1989-12-24",12.7 +"1989-12-25",16.4 +"1989-12-26",16.0 +"1989-12-27",13.3 +"1989-12-28",11.7 +"1989-12-29",10.4 +"1989-12-30",14.4 +"1989-12-31",12.7 +"1990-01-01",14.8 +"1990-01-02",13.3 +"1990-01-03",15.6 +"1990-01-04",14.5 +"1990-01-05",14.3 +"1990-01-06",15.3 +"1990-01-07",16.4 +"1990-01-08",14.8 +"1990-01-09",17.4 +"1990-01-10",18.8 +"1990-01-11",22.1 +"1990-01-12",19.0 +"1990-01-13",15.5 +"1990-01-14",15.8 +"1990-01-15",14.7 +"1990-01-16",10.7 +"1990-01-17",11.5 +"1990-01-18",15.0 +"1990-01-19",14.5 +"1990-01-20",14.5 +"1990-01-21",13.3 +"1990-01-22",14.3 +"1990-01-23",14.3 +"1990-01-24",20.5 +"1990-01-25",15.0 +"1990-01-26",17.1 +"1990-01-27",16.9 +"1990-01-28",16.9 +"1990-01-29",13.6 +"1990-01-30",16.4 +"1990-01-31",16.1 +"1990-02-01",12.0 +"1990-02-02",12.2 +"1990-02-03",14.8 +"1990-02-04",14.8 +"1990-02-05",14.4 +"1990-02-06",12.9 +"1990-02-07",13.4 +"1990-02-08",15.9 +"1990-02-09",16.1 +"1990-02-10",17.6 +"1990-02-11",15.6 +"1990-02-12",15.0 +"1990-02-13",13.0 +"1990-02-14",14.1 +"1990-02-15",17.3 +"1990-02-16",15.7 +"1990-02-17",18.6 +"1990-02-18",12.7 +"1990-02-19",14.0 +"1990-02-20",13.7 +"1990-02-21",16.3 +"1990-02-22",20.0 +"1990-02-23",17.0 +"1990-02-24",15.2 +"1990-02-25",16.5 +"1990-02-26",16.5 +"1990-02-27",17.3 +"1990-02-28",19.1 +"1990-03-01",19.3 +"1990-03-02",17.3 +"1990-03-03",19.0 +"1990-03-04",19.8 +"1990-03-05",19.3 +"1990-03-06",17.2 +"1990-03-07",14.2 +"1990-03-08",10.3 +"1990-03-09",13.0 +"1990-03-10",15.3 +"1990-03-11",15.0 +"1990-03-12",12.1 +"1990-03-13",9.2 +"1990-03-14",11.0 +"1990-03-15",15.0 +"1990-03-16",11.6 +"1990-03-17",11.6 +"1990-03-18",15.1 +"1990-03-19",15.0 +"1990-03-20",13.6 +"1990-03-21",12.5 +"1990-03-22",14.3 +"1990-03-23",16.0 +"1990-03-24",17.4 +"1990-03-25",16.9 +"1990-03-26",18.0 +"1990-03-27",20.6 +"1990-03-28",14.2 +"1990-03-29",10.9 +"1990-03-30",11.9 +"1990-03-31",13.3 +"1990-04-01",15.3 +"1990-04-02",14.7 +"1990-04-03",11.0 +"1990-04-04",12.2 +"1990-04-05",14.2 +"1990-04-06",17.0 +"1990-04-07",15.8 +"1990-04-08",15.2 +"1990-04-09",15.1 +"1990-04-10",14.7 +"1990-04-11",18.5 +"1990-04-12",16.4 +"1990-04-13",18.4 +"1990-04-14",15.1 +"1990-04-15",9.9 +"1990-04-16",10.2 +"1990-04-17",12.6 +"1990-04-18",13.2 +"1990-04-19",11.5 +"1990-04-20",13.8 +"1990-04-21",14.5 +"1990-04-22",14.7 +"1990-04-23",11.2 +"1990-04-24",12.7 +"1990-04-25",13.7 +"1990-04-26",11.5 +"1990-04-27",10.4 +"1990-04-28",8.9 +"1990-04-29",11.1 +"1990-04-30",9.5 +"1990-05-01",13.0 +"1990-05-02",13.9 +"1990-05-03",12.6 +"1990-05-04",14.3 +"1990-05-05",16.0 +"1990-05-06",13.3 +"1990-05-07",7.0 +"1990-05-08",4.9 +"1990-05-09",6.9 +"1990-05-10",13.7 +"1990-05-11",10.6 +"1990-05-12",12.3 +"1990-05-13",11.1 +"1990-05-14",10.2 +"1990-05-15",9.5 +"1990-05-16",8.9 +"1990-05-17",13.4 +"1990-05-18",9.1 +"1990-05-19",9.4 +"1990-05-20",8.7 +"1990-05-21",5.8 +"1990-05-22",4.5 +"1990-05-23",7.2 +"1990-05-24",10.0 +"1990-05-25",10.5 +"1990-05-26",10.7 +"1990-05-27",8.2 +"1990-05-28",6.1 +"1990-05-29",4.5 +"1990-05-30",6.1 +"1990-05-31",9.8 +"1990-06-01",9.7 +"1990-06-02",8.2 +"1990-06-03",8.4 +"1990-06-04",8.5 +"1990-06-05",10.4 +"1990-06-06",6.8 +"1990-06-07",6.0 +"1990-06-08",6.6 +"1990-06-09",7.8 +"1990-06-10",10.3 +"1990-06-11",7.2 +"1990-06-12",7.4 +"1990-06-13",11.4 +"1990-06-14",5.4 +"1990-06-15",4.4 +"1990-06-16",6.4 +"1990-06-17",9.3 +"1990-06-18",7.7 +"1990-06-19",8.1 +"1990-06-20",8.3 +"1990-06-21",9.1 +"1990-06-22",7.7 +"1990-06-23",10.6 +"1990-06-24",8.2 +"1990-06-25",7.9 +"1990-06-26",5.2 +"1990-06-27",5.9 +"1990-06-28",3.7 +"1990-06-29",5.6 +"1990-06-30",9.4 +"1990-07-01",7.4 +"1990-07-02",7.3 +"1990-07-03",7.7 +"1990-07-04",7.7 +"1990-07-05",9.3 +"1990-07-06",4.4 +"1990-07-07",5.7 +"1990-07-08",10.2 +"1990-07-09",10.2 +"1990-07-10",9.3 +"1990-07-11",5.4 +"1990-07-12",5.0 +"1990-07-13",7.6 +"1990-07-14",9.6 +"1990-07-15",10.4 +"1990-07-16",11.2 +"1990-07-17",9.1 +"1990-07-18",11.2 +"1990-07-19",6.8 +"1990-07-20",8.3 +"1990-07-21",9.7 +"1990-07-22",9.6 +"1990-07-23",9.8 +"1990-07-24",10.8 +"1990-07-25",9.2 +"1990-07-26",6.5 +"1990-07-27",8.1 +"1990-07-28",7.3 +"1990-07-29",7.9 +"1990-07-30",6.0 +"1990-07-31",5.0 +"1990-08-01",6.8 +"1990-08-02",9.8 +"1990-08-03",5.7 +"1990-08-04",8.6 +"1990-08-05",10.6 +"1990-08-06",7.8 +"1990-08-07",7.7 +"1990-08-08",8.6 +"1990-08-09",6.5 +"1990-08-10",6.9 +"1990-08-11",6.4 +"1990-08-12",8.5 +"1990-08-13",7.8 +"1990-08-14",9.3 +"1990-08-15",8.4 +"1990-08-16",7.8 +"1990-08-17",7.4 +"1990-08-18",7.7 +"1990-08-19",8.9 +"1990-08-20",9.7 +"1990-08-21",9.9 +"1990-08-22",6.1 +"1990-08-23",6.6 +"1990-08-24",7.6 +"1990-08-25",7.4 +"1990-08-26",8.0 +"1990-08-27",2.1 +"1990-08-28",5.9 +"1990-08-29",11.6 +"1990-08-30",8.6 +"1990-08-31",7.9 +"1990-09-01",6.0 +"1990-09-02",9.5 +"1990-09-03",8.6 +"1990-09-04",7.6 +"1990-09-05",10.4 +"1990-09-06",10.3 +"1990-09-07",7.5 +"1990-09-08",3.0 +"1990-09-09",5.3 +"1990-09-10",10.5 +"1990-09-11",14.6 +"1990-09-12",12.6 +"1990-09-13",9.8 +"1990-09-14",7.2 +"1990-09-15",10.1 +"1990-09-16",10.4 +"1990-09-17",3.7 +"1990-09-18",7.3 +"1990-09-19",11.6 +"1990-09-20",16.3 +"1990-09-21",9.6 +"1990-09-22",6.8 +"1990-09-23",5.2 +"1990-09-24",10.6 +"1990-09-25",16.3 +"1990-09-26",9.8 +"1990-09-27",4.6 +"1990-09-28",11.1 +"1990-09-29",8.7 +"1990-09-30",10.0 +"1990-10-01",11.3 +"1990-10-02",10.5 +"1990-10-03",9.9 +"1990-10-04",11.0 +"1990-10-05",14.0 +"1990-10-06",9.2 +"1990-10-07",9.8 +"1990-10-08",6.0 +"1990-10-09",9.8 +"1990-10-10",9.2 +"1990-10-11",11.8 +"1990-10-12",10.3 +"1990-10-13",7.5 +"1990-10-14",7.7 +"1990-10-15",15.8 +"1990-10-16",14.6 +"1990-10-17",10.5 +"1990-10-18",11.3 +"1990-10-19",10.9 +"1990-10-20",6.4 +"1990-10-21",10.9 +"1990-10-22",9.0 +"1990-10-23",10.9 +"1990-10-24",12.4 +"1990-10-25",11.6 +"1990-10-26",13.3 +"1990-10-27",14.4 +"1990-10-28",18.4 +"1990-10-29",13.6 +"1990-10-30",14.9 +"1990-10-31",14.8 +"1990-11-01",15.4 +"1990-11-02",11.8 +"1990-11-03",13.0 +"1990-11-04",11.1 +"1990-11-05",12.5 +"1990-11-06",18.3 +"1990-11-07",19.2 +"1990-11-08",15.4 +"1990-11-09",13.1 +"1990-11-10",11.5 +"1990-11-11",8.6 +"1990-11-12",12.6 +"1990-11-13",13.8 +"1990-11-14",14.6 +"1990-11-15",13.2 +"1990-11-16",12.3 +"1990-11-17",8.8 +"1990-11-18",10.7 +"1990-11-19",9.9 +"1990-11-20",8.3 +"1990-11-21",15.0 +"1990-11-22",12.2 +"1990-11-23",10.5 +"1990-11-24",11.1 +"1990-11-25",13.0 +"1990-11-26",12.9 +"1990-11-27",8.8 +"1990-11-28",14.7 +"1990-11-29",14.7 +"1990-11-30",12.7 +"1990-12-01",13.3 +"1990-12-02",13.2 +"1990-12-03",16.2 +"1990-12-04",17.3 +"1990-12-05",20.5 +"1990-12-06",20.2 +"1990-12-07",19.4 +"1990-12-08",15.5 +"1990-12-09",14.1 +"1990-12-10",11.0 +"1990-12-11",11.1 +"1990-12-12",14.0 +"1990-12-13",11.4 +"1990-12-14",12.5 +"1990-12-15",13.4 +"1990-12-16",13.6 +"1990-12-17",13.9 +"1990-12-18",17.2 +"1990-12-19",14.7 +"1990-12-20",15.4 +"1990-12-21",13.1 +"1990-12-22",13.2 +"1990-12-23",13.9 +"1990-12-24",10.0 +"1990-12-25",12.9 +"1990-12-26",14.6 +"1990-12-27",14.0 +"1990-12-28",13.6 +"1990-12-29",13.5 +"1990-12-30",15.7 +"1990-12-31",13.0 + +Daily minimum temperatures in Melbourne, Australia, 1981-1990 + From 15c303fec16fe21ceeb09bd42f3e45e8590ff2b8 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 8 Apr 2020 16:49:21 -0400 Subject: [PATCH 177/329] update --- rl/epsilon_greedy.py | 88 ++++++++++++++++++++++++++++++++++++ rl/epsilon_greedy_starter.py | 88 ++++++++++++++++++++++++++++++++++++ 2 files changed, 176 insertions(+) create mode 100644 rl/epsilon_greedy.py create mode 100644 rl/epsilon_greedy_starter.py diff --git a/rl/epsilon_greedy.py b/rl/epsilon_greedy.py new file mode 100644 index 00000000..b906de88 --- /dev/null +++ b/rl/epsilon_greedy.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = np.random.randint(len(bandits)) + else: + num_times_exploited += 1 + j = np.argmax([b.p_estimate for b in bandits]) + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/rl/epsilon_greedy_starter.py b/rl/epsilon_greedy_starter.py new file mode 100644 index 00000000..4bb9d278 --- /dev/null +++ b/rl/epsilon_greedy_starter.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N = # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = # TODO + else: + num_times_exploited += 1 + j = # TODO + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() From 02321889cc6be400afef9caf88d39d2784fec2a3 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 13 Apr 2020 12:58:30 -0400 Subject: [PATCH 178/329] update extra reading --- rl/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index 6db18878..64dd9812 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -1,3 +1,6 @@ +Finite-time Analysis of the Multiarmed Bandit Problem +https://homes.di.unimi.it/cesa-bianchi/Pubblicazioni/ml-02.pdf + Hacking Google reCAPTCHA v3 using Reinforcement Learning https://arxiv.org/pdf/1903.01003.pdf From aef261b10c09982de2a2ee2a6bbaffcc623ed5c8 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 13 Apr 2020 17:35:12 -0400 Subject: [PATCH 179/329] update --- rl/optimistic.py | 71 ++++++++++++++++++++++++++++++++++++++++ rl/optimistic_starter.py | 71 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 rl/optimistic.py create mode 100644 rl/optimistic_starter.py diff --git a/rl/optimistic.py b/rl/optimistic.py new file mode 100644 index 00000000..1d024fef --- /dev/null +++ b/rl/optimistic.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 5. + self.N = 1. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = np.argmax([b.p_estimate for b in bandits]) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/rl/optimistic_starter.py b/rl/optimistic_starter.py new file mode 100644 index 00000000..56b4e5c9 --- /dev/null +++ b/rl/optimistic_starter.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = # TODO + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() From 5eb31ca773aaf0e4b36bb764b3109c9d6ffb64a0 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 14 Apr 2020 13:48:37 -0400 Subject: [PATCH 180/329] update --- nlp_class3/attention.py | 2 +- nlp_class3/wseq2seq.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index 550b5bcb..6a8cd19f 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -65,7 +65,7 @@ def softmax_over_time(x): continue # split up the input and translation - input_text, translation = line.rstrip().split('\t') + input_text, translation, *rest = line.rstrip().split('\t') # make the target input and output # recall we'll be using teacher forcing diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index 52c0a65e..cec612cd 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -49,7 +49,7 @@ continue # split up the input and translation - input_text, translation = line.rstrip().split('\t') + input_text, translation, *rest = line.rstrip().split('\t') # make the target input and output # recall we'll be using teacher forcing From 44f8b5c5124fc5c097701e5b4bad7bb5725bb392 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 15 Apr 2020 21:37:51 -0400 Subject: [PATCH 181/329] update --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 468fb1ba..680c3fa7 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,12 @@ Find associated courses at https://deeplearningcourses.com Please note that not all code from all courses will be found in this repository. Some newer code examples (e.g. everything from Tensorflow 2.0) were done in Google Colab. Therefore, you should check the instructions given in the lectures for the course you are taking. +Why you should not fork this repo +================================= + +I've noticed that many people have out-of-date forks. Thus, I recommend not forking this repository if you take one of my courses. I am constantly updating my courses, and your fork will soon become out-of-date. You should clone the repository instead to make it easy to get updates (i.e. just "git pull" randomly and frequently). + + Direct Course Links =================== From 24a40c54dd777dcafcb28ff1ce2d528d7030b27c Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 15 Apr 2020 21:39:23 -0400 Subject: [PATCH 182/329] update --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 680c3fa7..e5308696 100644 --- a/README.md +++ b/README.md @@ -19,9 +19,16 @@ I've noticed that many people have out-of-date forks. Thus, I recommend not fork Direct Course Links =================== +PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Apr 2020) +https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP + + Tensorflow 2.0: Deep Learning and Artificial Intelligence -(Main Course - special discount link) https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 -(VIP Content) https://deeplearningcourses.com/c/deep-learning-tensorflow-2 +(Main Course - special discount link) +https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 + +Tensorflow 2.0: Deep Learning and Artificial Intelligence (VIP Content) +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 Cutting-Edge AI: Deep Reinforcement Learning in Python https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence From d346663f24982615de7ec0425c9e70c954b808fd Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 18 Apr 2020 17:32:52 -0400 Subject: [PATCH 183/329] update --- rl/ucb1.py | 82 +++++++++++++++++++++++----------------------- rl/ucb1_starter.py | 81 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+), 41 deletions(-) create mode 100644 rl/ucb1_starter.py diff --git a/rl/ucb1.py b/rl/ucb1.py index 48480fe1..5779b654 100644 --- a/rl/ucb1.py +++ b/rl/ucb1.py @@ -8,74 +8,74 @@ import numpy as np import matplotlib.pyplot as plt -from comparing_epsilons import run_experiment as run_experiment_eps + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] class Bandit: - def __init__(self, m): - self.m = m - self.mean = 0 - self.N = 0 + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far def pull(self): - return np.random.randn() + self.m + # draw a 1 with probability p + return np.random.random() < self.p def update(self, x): - self.N += 1 - self.mean = (1 - 1.0/self.N)*self.mean + 1.0/self.N*x + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N def ucb(mean, n, nj): - if nj == 0: - return float('inf') return mean + np.sqrt(2*np.log(n) / nj) -def run_experiment(m1, m2, m3, N): - bandits = [Bandit(m1), Bandit(m2), Bandit(m3)] +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 - data = np.empty(N) + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) - for i in range(N): - j = np.argmax([ucb(b.mean, i+1, b.N) for b in bandits]) + for i in range(NUM_TRIALS): + j = np.argmax([ucb(b.p_estimate, total_plays, b.N) for b in bandits]) x = bandits[j].pull() + total_plays += 1 bandits[j].update(x) # for the plot - data[i] = x - cumulative_average = np.cumsum(data) / (np.arange(N) + 1) - - # for b in bandits: - # print("bandit nj:", b.N) + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) # plot moving average ctr plt.plot(cumulative_average) - plt.plot(np.ones(N)*m1) - plt.plot(np.ones(N)*m2) - plt.plot(np.ones(N)*m3) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) plt.xscale('log') plt.show() - # for b in bandits: - # print(b.mean) - - return cumulative_average + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() -if __name__ == '__main__': - eps = run_experiment_eps(1.0, 2.0, 3.0, 0.1, 100000) - ucb = run_experiment(1.0, 2.0, 3.0, 100000) + for b in bandits: + print(b.p_estimate) - # log scale plot - plt.plot(eps, label='eps = 0.1') - plt.plot(ucb, label='ucb1') - plt.legend() - plt.xscale('log') - plt.show() + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + return cumulative_average - # linear plot - plt.plot(eps, label='eps = 0.1') - plt.plot(ucb, label='ucb1') - plt.legend() - plt.show() +if __name__ == '__main__': + run_experiment() diff --git a/rl/ucb1_starter.py b/rl/ucb1_starter.py new file mode 100644 index 00000000..9e9c3106 --- /dev/null +++ b/rl/ucb1_starter.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +# https://books.google.ca/books?id=_ATpBwAAQBAJ&lpg=PA201&ots=rinZM8jQ6s&dq=hoeffding%20bound%20gives%20probability%20%22greater%20than%201%22&pg=PA201#v=onepage&q&f=false +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def ucb(mean, n, nj): + return # TODO + + +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 + + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + for i in range(NUM_TRIALS): + j = # TODO + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + # for the plot + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.xscale('log') + plt.show() + + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + + for b in bandits: + print(b.p_estimate) + + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + From 300f62f840e74a3d4b725b29913c7eb3e6ab8ae2 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 18 Apr 2020 17:36:48 -0400 Subject: [PATCH 184/329] update --- rl/bayesian_bandit.py | 78 ++++++++++++++++++++++++++++++++++++++++++ rl/bayesian_starter.py | 78 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) create mode 100644 rl/bayesian_bandit.py create mode 100644 rl/bayesian_starter.py diff --git a/rl/bayesian_bandit.py b/rl/bayesian_bandit.py new file mode 100644 index 00000000..61e8f812 --- /dev/null +++ b/rl/bayesian_bandit.py @@ -0,0 +1,78 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import beta + + +# np.random.seed(2) +NUM_TRIALS = 2000 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + self.p = p + self.a = 1 + self.b = 1 + self.N = 0 # for information only + + def pull(self): + return np.random.random() < self.p + + def sample(self): + return np.random.beta(self.a, self.b) + + def update(self, x): + self.a += x + self.b += 1 - x + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(0, 1, 200) + for b in bandits: + y = beta.pdf(x, b.a, b.b) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + +if __name__ == "__main__": + experiment() diff --git a/rl/bayesian_starter.py b/rl/bayesian_starter.py new file mode 100644 index 00000000..68e12f75 --- /dev/null +++ b/rl/bayesian_starter.py @@ -0,0 +1,78 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import beta + + +# np.random.seed(2) +NUM_TRIALS = 2000 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + self.p = p + self.a = # TODO + self.b = # TODO + self.N = 0 # for information only + + def pull(self): + return np.random.random() < self.p + + def sample(self): + return # TODO - draw a sample from Beta(a, b) + + def update(self, x): + self.a = # TODO + self.b = # TODO + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(0, 1, 200) + for b in bandits: + y = beta.pdf(x, b.a, b.b) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = # TODO + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + +if __name__ == "__main__": + experiment() From 4e21e8fc5fbbb35609c03ed02ad171f4a759e7b0 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 18 Apr 2020 17:51:05 -0400 Subject: [PATCH 185/329] update --- rl/bayesian_normal.py | 86 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 rl/bayesian_normal.py diff --git a/rl/bayesian_normal.py b/rl/bayesian_normal.py new file mode 100644 index 00000000..4305e0f7 --- /dev/null +++ b/rl/bayesian_normal.py @@ -0,0 +1,86 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + + +np.random.seed(1) +NUM_TRIALS = 2000 +BANDIT_MEANS = [1, 2, 3] + + +class Bandit: + def __init__(self, true_mean): + self.true_mean = true_mean + # parameters for mu - prior is N(0,1) + self.m = 0 + self.lambda_ = 1 + self.sum_x = 0 # for convenience + self.tau = 1 + self.N = 0 + + def pull(self): + return np.random.randn() / np.sqrt(self.tau) + self.true_mean + + def sample(self): + return np.random.randn() / np.sqrt(self.lambda_) + self.m + + def update(self, x): + self.lambda_ += self.tau + self.sum_x += x + self.m = self.tau*self.sum_x / self.lambda_ + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(-3, 6, 200) + for b in bandits: + y = norm.pdf(x, b.m, np.sqrt(1. / b.lambda_)) + plt.plot(x, y, label=f"real mean: {b.true_mean:.4f}, num plays: {b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def run_experiment(): + bandits = [Bandit(m) for m in BANDIT_MEANS] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.empty(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # update rewards + rewards[i] = x + + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + for m in BANDIT_MEANS: + plt.plot(np.ones(NUM_TRIALS)*m) + plt.show() + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + + From 190288788450ebc9d2215fdecbde672f726c3799 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 6 May 2020 12:57:52 -0400 Subject: [PATCH 186/329] update --- rl/q_learning.py | 6 +----- rl/sarsa.py | 4 +--- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/rl/q_learning.py b/rl/q_learning.py index ace032d6..f9109650 100644 --- a/rl/q_learning.py +++ b/rl/q_learning.py @@ -87,17 +87,13 @@ r = grid.move(a) s2 = grid.current_state() - # adaptive learning rate - alpha = ALPHA / update_counts_sa[s][a] - update_counts_sa[s][a] += 0.005 - # we will update Q(s,a) AS we experience the episode old_qsa = Q[s][a] # the difference between SARSA and Q-Learning is with Q-Learning # we will use this max[a']{ Q(s',a')} in our update # even if we do not end up taking this action in the next step a2, max_q_s2a2 = max_dict(Q[s2]) - Q[s][a] = Q[s][a] + alpha*(r + GAMMA*max_q_s2a2 - Q[s][a]) + Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*max_q_s2a2 - Q[s][a]) biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a])) # we would like to know how often Q(s) has been updated too diff --git a/rl/sarsa.py b/rl/sarsa.py index 0f16e179..25c1a94d 100644 --- a/rl/sarsa.py +++ b/rl/sarsa.py @@ -91,10 +91,8 @@ a2 = random_action(a2, eps=0.5/t) # epsilon-greedy # we will update Q(s,a) AS we experience the episode - alpha = ALPHA / update_counts_sa[s][a] - update_counts_sa[s][a] += 0.005 old_qsa = Q[s][a] - Q[s][a] = Q[s][a] + alpha*(r + GAMMA*Q[s2][a2] - Q[s][a]) + Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*Q[s2][a2] - Q[s][a]) biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a])) # we would like to know how often Q(s) has been updated too From 23428c1168c4f2d5161bac576bc699069c06cda6 Mon Sep 17 00:00:00 2001 From: Mac User Date: Sun, 10 May 2020 20:37:48 -0400 Subject: [PATCH 187/329] update --- cnn_class/extra_reading.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cnn_class/extra_reading.txt b/cnn_class/extra_reading.txt index e0178415..c7cc13b8 100644 --- a/cnn_class/extra_reading.txt +++ b/cnn_class/extra_reading.txt @@ -5,4 +5,13 @@ ImageNet Classification with Deep Convolutional Neural Networks https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf Convolution arithmetic tutorial -http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html \ No newline at end of file +http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html + +Very Deep Convolutional Networks for Large-Scale Visual Recognition +http://www.robots.ox.ac.uk/~vgg/research/very_deep/ + +ImageNet Classification with Deep Convolutional Neural Networks +http://image-net.org/challenges/LSVRC/2012/supervision.pdf + +Going deeper with convolutions +https://arxiv.org/pdf/1409.4842.pdf \ No newline at end of file From b2023952ee81ac219cb1441bdc1a9c8594cb6d5e Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 17 May 2020 12:43:56 -0400 Subject: [PATCH 188/329] update --- tf2.0/plot_rl_rewards.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 tf2.0/plot_rl_rewards.py diff --git a/tf2.0/plot_rl_rewards.py b/tf2.0/plot_rl_rewards.py new file mode 100644 index 00000000..85cc1b2e --- /dev/null +++ b/tf2.0/plot_rl_rewards.py @@ -0,0 +1,16 @@ +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') +args = parser.parse_args() + +a = np.load(f'rl_trader_rewards/{args.mode}.npy') + +print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") + +plt.hist(a, bins=20) +plt.title(args.mode) +plt.show() \ No newline at end of file From 1612de9094fcf625d75ac7da221afbd780dac349 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 27 May 2020 03:56:17 -0400 Subject: [PATCH 189/329] update --- pytorch/rl_trader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch/rl_trader.py b/pytorch/rl_trader.py index fbb96c91..5738c9ac 100644 --- a/pytorch/rl_trader.py +++ b/pytorch/rl_trader.py @@ -322,7 +322,7 @@ def replay(self, batch_size=32): done = minibatch['d'] # Calculate the target: Q(s',a) - target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states), axis=1) + target = rewards + (1 - done) * self.gamma * np.amax(predict(self.model, next_states), axis=1) # With the PyTorch API, it is simplest to have the target be the # same shape as the predictions. From e2a9d460de94806f9ed087f61fd3794dbe3b04ff Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 29 May 2020 16:17:20 -0400 Subject: [PATCH 190/329] update --- ...erative_policy_evaluation_deterministic.py | 111 +++++++++++++++ ...erative_policy_evaluation_probabilistic.py | 112 +++++++++++++++ rl/policy_iteration_deterministic.py | 134 ++++++++++++++++++ rl/policy_iteration_probabilistic.py | 129 +++++++++++++++++ rl/value_iteration.py | 75 ++++++---- 5 files changed, 534 insertions(+), 27 deletions(-) create mode 100644 rl/iterative_policy_evaluation_deterministic.py create mode 100644 rl/iterative_policy_evaluation_probabilistic.py create mode 100644 rl/policy_iteration_deterministic.py create mode 100644 rl/policy_iteration_probabilistic.py diff --git a/rl/iterative_policy_evaluation_deterministic.py b/rl/iterative_policy_evaluation_deterministic.py new file mode 100644 index 00000000..06ddc479 --- /dev/null +++ b/rl/iterative_policy_evaluation_deterministic.py @@ -0,0 +1,111 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import standard_grid, ACTION_SPACE + +SMALL_ENOUGH = 1e-3 # threshold for convergence + + +def print_values(V, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + v = V.get((i,j), 0) + if v >= 0: + print(" %.2f|" % v, end="") + else: + print("%.2f|" % v, end="") # -ve sign takes up an extra space + print("") + + +def print_policy(P, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + a = P.get((i,j), ' ') + print(" %s |" % a, end="") + print("") + + + +if __name__ == '__main__': + + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + grid = standard_grid() + for i in range(grid.rows): + for j in range(grid.cols): + s = (i, j) + if not grid.is_terminal(s): + for a in ACTION_SPACE: + s2 = grid.get_next_state(s, a) + transition_probs[(s, a, s2)] = 1 + if s2 in grid.rewards: + rewards[(s, a, s2)] = grid.rewards[s2] + + ### fixed policy ### + policy = { + (2, 0): 'U', + (1, 0): 'U', + (0, 0): 'R', + (0, 1): 'R', + (0, 2): 'R', + (1, 2): 'U', + (2, 1): 'R', + (2, 2): 'U', + (2, 3): 'L', + } + print_policy(policy, grid) + + # initialize V(s) = 0 + V = {} + for s in grid.all_states(): + V[s] = 0 + + gamma = 0.9 # discount factor + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = 1 if policy.get(s) == a else 0 + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + gamma * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + + print("iter:", it, "biggest_change:", biggest_change) + print_values(V, grid) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + print("\n\n") diff --git a/rl/iterative_policy_evaluation_probabilistic.py b/rl/iterative_policy_evaluation_probabilistic.py new file mode 100644 index 00000000..07d019c0 --- /dev/null +++ b/rl/iterative_policy_evaluation_probabilistic.py @@ -0,0 +1,112 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import windy_grid, ACTION_SPACE + +SMALL_ENOUGH = 1e-3 # threshold for convergence + + +def print_values(V, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + v = V.get((i,j), 0) + if v >= 0: + print(" %.2f|" % v, end="") + else: + print("%.2f|" % v, end="") # -ve sign takes up an extra space + print("") + + +def print_policy(P, g): + for i in range(g.rows): + print("---------------------------") + for j in range(g.cols): + a = P.get((i,j), ' ') + print(" %s |" % a, end="") + print("") + + + +if __name__ == '__main__': + + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + # we can take this from the grid object and convert it to the format we want + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + grid = windy_grid() + for (s, a), v in grid.probs.items(): + for s2, p in v.items(): + transition_probs[(s, a, s2)] = p + rewards[(s, a, s2)] = grid.rewards.get(s2, 0) + + ### probabilistic policy ### + policy = { + (2, 0): {'U': 0.5, 'R': 0.5}, + (1, 0): {'U': 1.0}, + (0, 0): {'R': 1.0}, + (0, 1): {'R': 1.0}, + (0, 2): {'R': 1.0}, + (1, 2): {'U': 1.0}, + (2, 1): {'R': 1.0}, + (2, 2): {'U': 1.0}, + (2, 3): {'L': 1.0}, + } + print_policy(policy, grid) + + # initialize V(s) = 0 + V = {} + for s in grid.all_states(): + V[s] = 0 + + gamma = 0.9 # discount factor + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = policy[s].get(a, 0) + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + gamma * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + + print("iter:", it, "biggest_change:", biggest_change) + print_values(V, grid) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + print("V:", V) + print("\n\n") + + # sanity check + # at state (1, 2), value is 0.5 * 0.9 * 1 + 0.5 * (-1) = -0.05 + diff --git a/rl/policy_iteration_deterministic.py b/rl/policy_iteration_deterministic.py new file mode 100644 index 00000000..f751c428 --- /dev/null +++ b/rl/policy_iteration_deterministic.py @@ -0,0 +1,134 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import standard_grid, ACTION_SPACE +from iterative_policy_evaluation import print_values, print_policy + +SMALL_ENOUGH = 1e-3 +GAMMA = 0.9 + + +# copied from iterative_policy_evaluation +def get_transition_probs_and_rewards(grid): + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + for i in range(grid.rows): + for j in range(grid.cols): + s = (i, j) + if not grid.is_terminal(s): + for a in ACTION_SPACE: + s2 = grid.get_next_state(s, a) + transition_probs[(s, a, s2)] = 1 + if s2 in grid.rewards: + rewards[(s, a, s2)] = grid.rewards[s2] + + return transition_probs, rewards + + +def evaluate_deterministic_policy(grid, policy): + # initialize V(s) = 0 + V = {} + for s in grid.all_states(): + V[s] = 0 + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = 1 if policy.get(s) == a else 0 + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + return V + + +if __name__ == '__main__': + + grid = standard_grid() + transition_probs, rewards = get_transition_probs_and_rewards(grid) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + # we'll randomly choose an action and update as we learn + policy = {} + for s in grid.actions.keys(): + policy[s] = np.random.choice(ACTION_SPACE) + + # initial policy + print("initial policy:") + print_policy(policy, grid) + + # repeat until convergence - will break out when policy does not change + while True: + + # policy evaluation step - we already know how to do this! + V = evaluate_deterministic_policy(grid, policy) + + # policy improvement step + is_policy_converged = True + for s in grid.actions.keys(): + old_a = policy[s] + new_a = None + best_value = float('-inf') + + # loop through all possible actions to find the best current action + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + if v > best_value: + best_value = v + new_a = a + + # new_a now represents the best action in this state + policy[s] = new_a + if new_a != old_a: + is_policy_converged = False + + if is_policy_converged: + break + + # once we're done, print the final policy and values + print("values:") + print_values(V, grid) + print("policy:") + print_policy(policy, grid) diff --git a/rl/policy_iteration_probabilistic.py b/rl/policy_iteration_probabilistic.py new file mode 100644 index 00000000..c46bc8e2 --- /dev/null +++ b/rl/policy_iteration_probabilistic.py @@ -0,0 +1,129 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import windy_grid, ACTION_SPACE +from iterative_policy_evaluation import print_values, print_policy + +SMALL_ENOUGH = 1e-3 +GAMMA = 0.9 + + +# copied from iterative_policy_evaluation +def get_transition_probs_and_rewards(grid): + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + for (s, a), v in grid.probs.items(): + for s2, p in v.items(): + transition_probs[(s, a, s2)] = p + rewards[(s, a, s2)] = grid.rewards.get(s2, 0) + + return transition_probs, rewards + + +def evaluate_deterministic_policy(grid, policy): + # initialize V(s) = 0 + V = {} + for s in grid.all_states(): + V[s] = 0 + + # repeat until convergence + it = 0 + while True: + biggest_change = 0 + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] + new_v = 0 # we will accumulate the answer + for a in ACTION_SPACE: + for s2 in grid.all_states(): + + # action probability is deterministic + action_prob = 1 if policy.get(s) == a else 0 + + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # after done getting the new value, update the value table + V[s] = new_v + biggest_change = max(biggest_change, np.abs(old_v - V[s])) + it += 1 + + if biggest_change < SMALL_ENOUGH: + break + return V + + +if __name__ == '__main__': + + grid = windy_grid() + transition_probs, rewards = get_transition_probs_and_rewards(grid) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + # we'll randomly choose an action and update as we learn + policy = {} + for s in grid.actions.keys(): + policy[s] = np.random.choice(ACTION_SPACE) + + # initial policy + print("initial policy:") + print_policy(policy, grid) + + # repeat until convergence - will break out when policy does not change + while True: + + # policy evaluation step - we already know how to do this! + V = evaluate_deterministic_policy(grid, policy) + + # policy improvement step + is_policy_converged = True + for s in grid.actions.keys(): + old_a = policy[s] + new_a = None + best_value = float('-inf') + + # loop through all possible actions to find the best current action + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + if v > best_value: + best_value = v + new_a = a + + # new_a now represents the best action in this state + policy[s] = new_a + if new_a != old_a: + is_policy_converged = False + + if is_policy_converged: + break + + # once we're done, print the final policy and values + print("values:") + print_values(V, grid) + print("policy:") + print_policy(policy, grid) diff --git a/rl/value_iteration.py b/rl/value_iteration.py index 6367ec6f..349288de 100644 --- a/rl/value_iteration.py +++ b/rl/value_iteration.py @@ -7,20 +7,36 @@ import numpy as np -from grid_world import standard_grid, negative_grid +from grid_world import windy_grid, ACTION_SPACE from iterative_policy_evaluation import print_values, print_policy SMALL_ENOUGH = 1e-3 GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') -# this is deterministic -# all p(s',r|s,a) = 1 or 0 +# copied from iterative_policy_evaluation +def get_transition_probs_and_rewards(grid): + ### define transition probabilities and grid ### + # the key is (s, a, s'), the value is the probability + # that is, transition_probs[(s, a, s')] = p(s' | s, a) + # any key NOT present will considered to be impossible (i.e. probability 0) + transition_probs = {} + + # to reduce the dimensionality of the dictionary, we'll use deterministic + # rewards, r(s, a, s') + # note: you could make it simpler by using r(s') since the reward doesn't + # actually depend on (s, a) + rewards = {} + + for (s, a), v in grid.probs.items(): + for s2, p in v.items(): + transition_probs[(s, a, s2)] = p + rewards[(s, a, s2)] = grid.rewards.get(s2, 0) + + return transition_probs, rewards if __name__ == '__main__': - # this grid gives you a reward of -0.1 for every non-terminal state - # we want to see if this will encourage finding a shorter path to the goal - grid = negative_grid() + grid = windy_grid() + transition_probs, rewards = get_transition_probs_and_rewards(grid) # print rewards print("rewards:") @@ -30,7 +46,7 @@ # we'll randomly choose an action and update as we learn policy = {} for s in grid.actions.keys(): - policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS) + policy[s] = np.random.choice(ACTION_SPACE) # initial policy print("initial policy:") @@ -40,32 +56,33 @@ V = {} states = grid.all_states() for s in states: - # V[s] = 0 - if s in grid.actions: - V[s] = np.random.random() - else: - # terminal state - V[s] = 0 + V[s] = 0 # repeat until convergence # V[s] = max[a]{ sum[s',r] { p(s',r|s,a)[r + gamma*V[s']] } } + it = 0 while True: biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in policy: + for s in grid.all_states(): + if not grid.is_terminal(s): + old_v = V[s] new_v = float('-inf') - for a in ALL_POSSIBLE_ACTIONS: - grid.set_state(s) - r = grid.move(a) - v = r + GAMMA * V[grid.current_state()] + + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # keep v if it's better if v > new_v: new_v = v + V[s] = new_v biggest_change = max(biggest_change, np.abs(old_v - V[s])) + it += 1 if biggest_change < SMALL_ENOUGH: break @@ -74,10 +91,14 @@ best_a = None best_value = float('-inf') # loop through all possible actions to find the best current action - for a in ALL_POSSIBLE_ACTIONS: - grid.set_state(s) - r = grid.move(a) - v = r + GAMMA * V[grid.current_state()] + for a in ACTION_SPACE: + v = 0 + for s2 in grid.all_states(): + # reward is a function of (s, a, s'), 0 if not specified + r = rewards.get((s, a, s2), 0) + v += transition_probs.get((s, a, s2), 0) * (r + GAMMA * V[s2]) + + # best_a is the action associated with best_value if v > best_value: best_value = v best_a = a From 7f68dd31b7d11796f97d2028608bae5909d52938 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 29 May 2020 16:53:35 -0400 Subject: [PATCH 191/329] update --- rl/grid_world.py | 170 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 169 insertions(+), 1 deletion(-) diff --git a/rl/grid_world.py b/rl/grid_world.py index 891b5441..35c08828 100644 --- a/rl/grid_world.py +++ b/rl/grid_world.py @@ -5,10 +5,12 @@ # Note: you may need to update your version of future # sudo pip install -U future - import numpy as np +ACTION_SPACE = ('U', 'D', 'L', 'R') + + class Grid: # Environment def __init__(self, rows, cols, start): self.rows = rows @@ -32,6 +34,22 @@ def current_state(self): def is_terminal(self, s): return s not in self.actions + def get_next_state(self, s, a): + # this answers: where would I end up if I perform action 'a' in state 's'? + i, j = s[0], s[1] + + # if this action moves you somewhere else, then it will be in this dictionary + if a in self.actions[(i, j)]: + if a == 'U': + i -= 1 + elif a == 'D': + i += 1 + elif a == 'R': + j += 1 + elif a == 'L': + j -= 1 + return i, j + def move(self, action): # check if legal move first if action in self.actions[(self.i, self.j)]: @@ -116,3 +134,153 @@ def negative_grid(step_cost=-0.1): }) return g + + + + +class WindyGrid: + def __init__(self, rows, cols, start): + self.rows = rows + self.cols = cols + self.i = start[0] + self.j = start[1] + + def set(self, rewards, actions, probs): + # rewards should be a dict of: (i, j): r (row, col): reward + # actions should be a dict of: (i, j): A (row, col): list of possible actions + self.rewards = rewards + self.actions = actions + self.probs = probs + + def set_state(self, s): + self.i = s[0] + self.j = s[1] + + def current_state(self): + return (self.i, self.j) + + def is_terminal(self, s): + return s not in self.actions + + def move(self, action): + s = (self.i, self.j) + a = action + + next_state_probs = self.probs[(s, a)] + next_states = list(next_state_probs.keys()) + next_probs = list(next_state_probs.values()) + s2 = np.random.choice(next_states, p=next_probs) + + # update the current state + self.i, self.j = s2 + + # return a reward (if any) + return self.rewards.get(s2, 0) + + def game_over(self): + # returns true if game is over, else false + # true if we are in a state where no actions are possible + return (self.i, self.j) not in self.actions + + def all_states(self): + # possibly buggy but simple way to get all states + # either a position that has possible next actions + # or a position that yields a reward + return set(self.actions.keys()) | set(self.rewards.keys()) + + +def windy_grid(): + g = WindyGrid(3, 4, (2, 0)) + rewards = {(0, 3): 1, (1, 3): -1} + actions = { + (0, 0): ('D', 'R'), + (0, 1): ('L', 'R'), + (0, 2): ('L', 'D', 'R'), + (1, 0): ('U', 'D'), + (1, 2): ('U', 'D', 'R'), + (2, 0): ('U', 'R'), + (2, 1): ('L', 'R'), + (2, 2): ('L', 'R', 'U'), + (2, 3): ('L', 'U'), + } + + # p(s' | s, a) represented as: + # KEY: (s, a) --> VALUE: {s': p(s' | s, a)} + probs = { + ((2, 0), 'U'): {(1, 0): 1.0}, + ((2, 0), 'D'): {(2, 0): 1.0}, + ((2, 0), 'L'): {(2, 0): 1.0}, + ((2, 0), 'R'): {(2, 1): 1.0}, + ((1, 0), 'U'): {(0, 0): 1.0}, + ((1, 0), 'D'): {(2, 0): 1.0}, + ((1, 0), 'L'): {(1, 0): 1.0}, + ((1, 0), 'R'): {(1, 0): 1.0}, + ((0, 0), 'U'): {(0, 0): 1.0}, + ((0, 0), 'D'): {(1, 0): 1.0}, + ((0, 0), 'L'): {(0, 0): 1.0}, + ((0, 0), 'R'): {(0, 1): 1.0}, + ((0, 1), 'U'): {(0, 1): 1.0}, + ((0, 1), 'D'): {(0, 1): 1.0}, + ((0, 1), 'L'): {(0, 0): 1.0}, + ((0, 1), 'R'): {(0, 2): 1.0}, + ((0, 2), 'U'): {(0, 2): 1.0}, + ((0, 2), 'D'): {(1, 2): 1.0}, + ((0, 2), 'L'): {(0, 1): 1.0}, + ((0, 2), 'R'): {(0, 3): 1.0}, + ((2, 1), 'U'): {(2, 1): 1.0}, + ((2, 1), 'D'): {(2, 1): 1.0}, + ((2, 1), 'L'): {(2, 0): 1.0}, + ((2, 1), 'R'): {(2, 2): 1.0}, + ((2, 2), 'U'): {(1, 2): 1.0}, + ((2, 2), 'D'): {(2, 2): 1.0}, + ((2, 2), 'L'): {(2, 1): 1.0}, + ((2, 2), 'R'): {(2, 3): 1.0}, + ((2, 3), 'U'): {(1, 3): 1.0}, + ((2, 3), 'D'): {(2, 3): 1.0}, + ((2, 3), 'L'): {(2, 2): 1.0}, + ((2, 3), 'R'): {(2, 3): 1.0}, + ((1, 2), 'U'): {(0, 2): 0.5, (1, 3): 0.5}, + ((1, 2), 'D'): {(2, 2): 1.0}, + ((1, 2), 'L'): {(1, 2): 1.0}, + ((1, 2), 'R'): {(1, 3): 1.0}, + } + g.set(rewards, actions, probs) + return g + + + + +def grid_5x5(step_cost=-0.1): + g = Grid(5, 5, (4, 0)) + rewards = {(0, 4): 1, (1, 4): -1} + actions = { + (0, 0): ('D', 'R'), + (0, 1): ('L', 'R'), + (0, 2): ('L', 'R'), + (0, 3): ('L', 'D', 'R'), + (1, 0): ('U', 'D', 'R'), + (1, 1): ('U', 'D', 'L'), + (1, 3): ('U', 'D', 'R'), + (2, 0): ('U', 'D', 'R'), + (2, 1): ('U', 'L', 'R'), + (2, 2): ('L', 'R', 'D'), + (2, 3): ('L', 'R', 'U'), + (2, 4): ('L', 'U', 'D'), + (3, 0): ('U', 'D'), + (3, 2): ('U', 'D'), + (3, 4): ('U', 'D'), + (4, 0): ('U', 'R'), + (4, 1): ('L', 'R'), + (4, 2): ('L', 'R', 'U'), + (4, 3): ('L', 'R'), + (4, 4): ('L', 'U'), + } + g.set(rewards, actions) + + # non-terminal states + visitable_states = actions.keys() + for s in visitable_states: + g.rewards[s] = step_cost + + return g + From 97c975ebc00974acb988a6f2e7e3b2e6d2f51e2b Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 3 Jun 2020 22:44:04 -0400 Subject: [PATCH 192/329] update --- rl/grid_world.py | 71 ++++++++++++++++++++++++++++ rl/policy_iteration_deterministic.py | 2 +- rl/policy_iteration_probabilistic.py | 5 +- rl/value_iteration.py | 13 +---- 4 files changed, 77 insertions(+), 14 deletions(-) diff --git a/rl/grid_world.py b/rl/grid_world.py index 35c08828..87955e13 100644 --- a/rl/grid_world.py +++ b/rl/grid_world.py @@ -249,6 +249,77 @@ def windy_grid(): +def windy_grid_penalized(step_cost=-0.1): + g = WindyGrid(3, 4, (2, 0)) + rewards = { + (0, 0): step_cost, + (0, 1): step_cost, + (0, 2): step_cost, + (1, 0): step_cost, + (1, 2): step_cost, + (2, 0): step_cost, + (2, 1): step_cost, + (2, 2): step_cost, + (2, 3): step_cost, + (0, 3): 1, + (1, 3): -1 + } + actions = { + (0, 0): ('D', 'R'), + (0, 1): ('L', 'R'), + (0, 2): ('L', 'D', 'R'), + (1, 0): ('U', 'D'), + (1, 2): ('U', 'D', 'R'), + (2, 0): ('U', 'R'), + (2, 1): ('L', 'R'), + (2, 2): ('L', 'R', 'U'), + (2, 3): ('L', 'U'), + } + + # p(s' | s, a) represented as: + # KEY: (s, a) --> VALUE: {s': p(s' | s, a)} + probs = { + ((2, 0), 'U'): {(1, 0): 1.0}, + ((2, 0), 'D'): {(2, 0): 1.0}, + ((2, 0), 'L'): {(2, 0): 1.0}, + ((2, 0), 'R'): {(2, 1): 1.0}, + ((1, 0), 'U'): {(0, 0): 1.0}, + ((1, 0), 'D'): {(2, 0): 1.0}, + ((1, 0), 'L'): {(1, 0): 1.0}, + ((1, 0), 'R'): {(1, 0): 1.0}, + ((0, 0), 'U'): {(0, 0): 1.0}, + ((0, 0), 'D'): {(1, 0): 1.0}, + ((0, 0), 'L'): {(0, 0): 1.0}, + ((0, 0), 'R'): {(0, 1): 1.0}, + ((0, 1), 'U'): {(0, 1): 1.0}, + ((0, 1), 'D'): {(0, 1): 1.0}, + ((0, 1), 'L'): {(0, 0): 1.0}, + ((0, 1), 'R'): {(0, 2): 1.0}, + ((0, 2), 'U'): {(0, 2): 1.0}, + ((0, 2), 'D'): {(1, 2): 1.0}, + ((0, 2), 'L'): {(0, 1): 1.0}, + ((0, 2), 'R'): {(0, 3): 1.0}, + ((2, 1), 'U'): {(2, 1): 1.0}, + ((2, 1), 'D'): {(2, 1): 1.0}, + ((2, 1), 'L'): {(2, 0): 1.0}, + ((2, 1), 'R'): {(2, 2): 1.0}, + ((2, 2), 'U'): {(1, 2): 1.0}, + ((2, 2), 'D'): {(2, 2): 1.0}, + ((2, 2), 'L'): {(2, 1): 1.0}, + ((2, 2), 'R'): {(2, 3): 1.0}, + ((2, 3), 'U'): {(1, 3): 1.0}, + ((2, 3), 'D'): {(2, 3): 1.0}, + ((2, 3), 'L'): {(2, 2): 1.0}, + ((2, 3), 'R'): {(2, 3): 1.0}, + ((1, 2), 'U'): {(0, 2): 0.5, (1, 3): 0.5}, + ((1, 2), 'D'): {(2, 2): 1.0}, + ((1, 2), 'L'): {(1, 2): 1.0}, + ((1, 2), 'R'): {(1, 3): 1.0}, + } + g.set(rewards, actions, probs) + return g + + def grid_5x5(step_cost=-0.1): g = Grid(5, 5, (4, 0)) diff --git a/rl/policy_iteration_deterministic.py b/rl/policy_iteration_deterministic.py index f751c428..be552a28 100644 --- a/rl/policy_iteration_deterministic.py +++ b/rl/policy_iteration_deterministic.py @@ -8,7 +8,7 @@ import numpy as np from grid_world import standard_grid, ACTION_SPACE -from iterative_policy_evaluation import print_values, print_policy +from iterative_policy_evaluation_deterministic import print_values, print_policy SMALL_ENOUGH = 1e-3 GAMMA = 0.9 diff --git a/rl/policy_iteration_probabilistic.py b/rl/policy_iteration_probabilistic.py index c46bc8e2..cbfca297 100644 --- a/rl/policy_iteration_probabilistic.py +++ b/rl/policy_iteration_probabilistic.py @@ -7,7 +7,7 @@ import numpy as np -from grid_world import windy_grid, ACTION_SPACE +from grid_world import windy_grid, windy_grid_penalized, ACTION_SPACE from iterative_policy_evaluation import print_values, print_policy SMALL_ENOUGH = 1e-3 @@ -72,7 +72,8 @@ def evaluate_deterministic_policy(grid, policy): if __name__ == '__main__': - grid = windy_grid() + grid = windy_grid_penalized(-2) + # grid = windy_grid() transition_probs, rewards = get_transition_probs_and_rewards(grid) # print rewards diff --git a/rl/value_iteration.py b/rl/value_iteration.py index 349288de..8f6738d2 100644 --- a/rl/value_iteration.py +++ b/rl/value_iteration.py @@ -42,16 +42,6 @@ def get_transition_probs_and_rewards(grid): print("rewards:") print_values(grid.rewards, grid) - # state -> action - # we'll randomly choose an action and update as we learn - policy = {} - for s in grid.actions.keys(): - policy[s] = np.random.choice(ACTION_SPACE) - - # initial policy - print("initial policy:") - print_policy(policy, grid) - # initialize V(s) V = {} states = grid.all_states() @@ -87,7 +77,8 @@ def get_transition_probs_and_rewards(grid): break # find a policy that leads to optimal value function - for s in policy.keys(): + policy = {} + for s in grid.actions.keys(): best_a = None best_value = float('-inf') # loop through all possible actions to find the best current action From 42cd79292b8fd1e02feea8e001b04fcc31c40c3d Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 18 Jun 2020 11:56:19 -0400 Subject: [PATCH 193/329] update --- cnn_class/cnn_tf.py | 1 - cnn_class/cnn_tf_plot_filters.py | 1 - cnn_class/cnn_theano.py | 5 ++--- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/cnn_class/cnn_tf.py b/cnn_class/cnn_tf.py index ec768d75..284910e8 100644 --- a/cnn_class/cnn_tf.py +++ b/cnn_class/cnn_tf.py @@ -32,7 +32,6 @@ def convpool(X, W, b): def init_filter(shape, poolsz): - # w = np.random.randn(*shape) * np.sqrt(2) / np.sqrt(np.prod(shape[:-1]) + shape[-1]*np.prod(shape[:-2]) / np.prod(poolsz)) w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[:-1])) return w.astype(np.float32) diff --git a/cnn_class/cnn_tf_plot_filters.py b/cnn_class/cnn_tf_plot_filters.py index 8be13efe..8ab88671 100644 --- a/cnn_class/cnn_tf_plot_filters.py +++ b/cnn_class/cnn_tf_plot_filters.py @@ -32,7 +32,6 @@ def convpool(X, W, b): def init_filter(shape, poolsz): - # w = np.random.randn(*shape) * np.sqrt(2) / np.sqrt(np.prod(shape[:-1]) + shape[-1]*np.prod(shape[:-2]) / np.prod(poolsz)) w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[:-1])) return w.astype(np.float32) diff --git a/cnn_class/cnn_theano.py b/cnn_class/cnn_theano.py index 311577d6..41a297b9 100644 --- a/cnn_class/cnn_theano.py +++ b/cnn_class/cnn_theano.py @@ -44,7 +44,6 @@ def convpool(X, W, b, poolsize=(2, 2)): def init_filter(shape, poolsz): - # w = np.random.randn(*shape) / np.sqrt(np.prod(shape[1:]) + shape[0]*np.prod(shape[2:]) / np.prod(poolsz)) w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[1:])) return w.astype(np.float32) @@ -82,8 +81,8 @@ def main(): max_iter = 6 print_period = 10 - lr = np.float32(1e-2) - mu = np.float32(0.99) + lr = np.float32(1e-3) + mu = np.float32(0.9) N = Xtrain.shape[0] batch_sz = 500 From c14fd76c4cc3133f1c670dd83455d8d52a0e25b7 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 18 Jun 2020 14:10:15 -0400 Subject: [PATCH 194/329] update --- nlp_class3/attention.py | 10 +++++++--- nlp_class3/bilstm_mnist.py | 10 +++++++--- nlp_class3/bilstm_test.py | 11 +++++++---- nlp_class3/poetry.py | 11 +++++++---- nlp_class3/simple_rnn_test.py | 11 +++++++---- nlp_class3/wseq2seq.py | 11 +++++++---- 6 files changed, 42 insertions(+), 22 deletions(-) diff --git a/nlp_class3/attention.py b/nlp_class3/attention.py index 6a8cd19f..19ba3066 100644 --- a/nlp_class3/attention.py +++ b/nlp_class3/attention.py @@ -16,9 +16,13 @@ import numpy as np import matplotlib.pyplot as plt -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass # make sure we do softmax over the time axis diff --git a/nlp_class3/bilstm_mnist.py b/nlp_class3/bilstm_mnist.py index 03e3752d..3da99920 100644 --- a/nlp_class3/bilstm_mnist.py +++ b/nlp_class3/bilstm_mnist.py @@ -13,9 +13,13 @@ import pandas as pd import matplotlib.pyplot as plt -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass def get_mnist(limit=None): diff --git a/nlp_class3/bilstm_test.py b/nlp_class3/bilstm_test.py index 800902f9..2f31b489 100644 --- a/nlp_class3/bilstm_test.py +++ b/nlp_class3/bilstm_test.py @@ -9,10 +9,13 @@ import numpy as np import matplotlib.pyplot as plt -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass T = 8 diff --git a/nlp_class3/poetry.py b/nlp_class3/poetry.py index 7dabafe3..74fac502 100644 --- a/nlp_class3/poetry.py +++ b/nlp_class3/poetry.py @@ -17,10 +17,13 @@ from keras.preprocessing.sequence import pad_sequences from keras.optimizers import Adam, SGD -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass # some configuration diff --git a/nlp_class3/simple_rnn_test.py b/nlp_class3/simple_rnn_test.py index b67a4b29..e6f84430 100644 --- a/nlp_class3/simple_rnn_test.py +++ b/nlp_class3/simple_rnn_test.py @@ -9,10 +9,13 @@ import numpy as np import matplotlib.pyplot as plt -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass T = 8 diff --git a/nlp_class3/wseq2seq.py b/nlp_class3/wseq2seq.py index cec612cd..0f2e1c70 100644 --- a/nlp_class3/wseq2seq.py +++ b/nlp_class3/wseq2seq.py @@ -15,10 +15,13 @@ import numpy as np import matplotlib.pyplot as plt -import keras.backend as K -if len(K.tensorflow_backend._get_available_gpus()) > 0: - from keras.layers import CuDNNLSTM as LSTM - from keras.layers import CuDNNGRU as GRU +try: + import keras.backend as K + if len(K.tensorflow_backend._get_available_gpus()) > 0: + from keras.layers import CuDNNLSTM as LSTM + from keras.layers import CuDNNGRU as GRU +except: + pass # some config From 069b5d4a5f8a9bf1df689991f4d6c9d6a9bdce14 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 21 Jun 2020 19:04:26 -0400 Subject: [PATCH 195/329] update --- unsupervised_class/neural_kmeans.py | 44 +++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 unsupervised_class/neural_kmeans.py diff --git a/unsupervised_class/neural_kmeans.py b/unsupervised_class/neural_kmeans.py new file mode 100644 index 00000000..f5f71b05 --- /dev/null +++ b/unsupervised_class/neural_kmeans.py @@ -0,0 +1,44 @@ +import numpy as np +import matplotlib.pyplot as plt +from kmeans import get_simple_data +from sklearn.preprocessing import StandardScaler + + +X = get_simple_data() +scaler = StandardScaler() +X = scaler.fit_transform(X) + +N, D = X.shape +K = 3 + +W = np.random.randn(D, K) + + +n_epochs = 100 +learning_rate = 0.001 +losses = [] + + +for i in range(n_epochs): + loss = 0 + for j in range(N): + h = W.T.dot(X[j]) # K-length vector + k = np.argmax(h) # winning neuron + + # accumulate loss + loss += (W[:,k] - X[j]).dot(W[:,k] - X[j]) + + # weight update + W[:,k] += learning_rate * (X[j] - W[:,k]) + + losses.append(loss) + + +# plot losses +plt.plot(losses) +plt.show() + +# show cluster assignments +H = np.argmax(X.dot(W), axis=1) +plt.scatter(X[:,0], X[:,1], c=H, alpha=0.5) +plt.show() From 4e5e20e494fc0e55f84e82acb9cac1e7b9f0306e Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 21 Jun 2020 21:24:45 -0400 Subject: [PATCH 196/329] add some comments --- unsupervised_class/neural_kmeans.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/unsupervised_class/neural_kmeans.py b/unsupervised_class/neural_kmeans.py index f5f71b05..5e9a2d20 100644 --- a/unsupervised_class/neural_kmeans.py +++ b/unsupervised_class/neural_kmeans.py @@ -4,21 +4,24 @@ from sklearn.preprocessing import StandardScaler +# get the data and standardize it X = get_simple_data() scaler = StandardScaler() X = scaler.fit_transform(X) +# get shapes N, D = X.shape K = 3 +# initialize parameters W = np.random.randn(D, K) - +# set hyperparameters n_epochs = 100 learning_rate = 0.001 losses = [] - +# training loop for i in range(n_epochs): loss = 0 for j in range(N): From 7c4956612fa2460d499542cd98f4519090ef93a1 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 22 Jun 2020 13:03:43 -0400 Subject: [PATCH 197/329] update --- ann_class2/batch_norm_theano.py | 22 ++++++++++------- ann_class2/dropout_theano.py | 42 ++++++++++++--------------------- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/ann_class2/batch_norm_theano.py b/ann_class2/batch_norm_theano.py index cfb9d999..c86c11a9 100644 --- a/ann_class2/batch_norm_theano.py +++ b/ann_class2/batch_norm_theano.py @@ -95,6 +95,19 @@ def forward(self, X): return self.f(X.dot(self.W) + self.b) +def momentum_updates(cost, params, lr, mu): + grads = T.grad(cost, params) + updates = [] + + for p, g in zip(params, grads): + dp = theano.shared(p.get_value() * 0) + new_dp = mu*dp - lr*g + new_p = p + new_dp + updates.append((dp, new_dp)) + updates.append((p, new_p)) + return updates + + class ANN(object): def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes @@ -125,9 +138,6 @@ def fit(self, X, Y, Xtest, Ytest, activation=T.nnet.relu, learning_rate=1e-2, mu for h in self.layers: self.params += h.params - # for momentum - dparams = [theano.shared(np.zeros_like(p.get_value())) for p in self.params] - # note! we will need to build the output differently # for train and test (prediction) @@ -143,11 +153,7 @@ def fit(self, X, Y, Xtest, Ytest, activation=T.nnet.relu, learning_rate=1e-2, mu grads = T.grad(cost, self.params) # momentum only - updates = [ - (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) - ] + [ - (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) - ] + updates = momentum_updates(cost, self.params, learning_rate, mu) for layer in self.layers[:-1]: updates += layer.running_update diff --git a/ann_class2/dropout_theano.py b/ann_class2/dropout_theano.py index 81e5bebe..61486626 100644 --- a/ann_class2/dropout_theano.py +++ b/ann_class2/dropout_theano.py @@ -19,6 +19,19 @@ from sklearn.utils import shuffle +def momentum_updates(cost, params, lr, mu): + grads = T.grad(cost, params) + updates = [] + + for p, g in zip(params, grads): + dp = theano.shared(p.get_value() * 0) + new_dp = mu*dp - lr*g + new_p = p + new_dp + updates.append((dp, new_dp)) + updates.append((p, new_p)) + return updates + + class HiddenLayer(object): def __init__(self, M1, M2, an_id): self.id = an_id @@ -39,7 +52,7 @@ def __init__(self, hidden_layer_sizes, p_keep): self.hidden_layer_sizes = hidden_layer_sizes self.dropout_rates = p_keep - def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=100, show_fig=False): + def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-2, mu=0.9, decay=0.9, epochs=10, batch_sz=100, show_fig=False): X = X.astype(np.float32) Y = Y.astype(np.int32) Xvalid = Xvalid.astype(np.float32) @@ -75,32 +88,7 @@ def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-4, mu=0.9, decay=0.9, epoch # this cost is for training cost = -T.mean(T.log(pY_train[T.arange(thY.shape[0]), thY])) - - # gradients wrt each param - grads = T.grad(cost, self.params) - - # for momentum - dparams = [theano.shared(np.zeros_like(p.get_value())) for p in self.params] - - # for rmsprop - cache = [theano.shared(np.ones_like(p.get_value())) for p in self.params] - - new_cache = [decay*c + (1-decay)*g*g for p, c, g in zip(self.params, cache, grads)] - new_dparams = [mu*dp - learning_rate*g/T.sqrt(new_c + 1e-10) for p, new_c, dp, g in zip(self.params, new_cache, dparams, grads)] - updates = [ - (c, new_c) for c, new_c in zip(cache, new_cache) - ] + [ - (dp, new_dp) for dp, new_dp in zip(dparams, new_dparams) - ] + [ - (p, p + new_dp) for p, new_dp in zip(self.params, new_dparams) - ] - - # momentum only - # updates = [ - # (p, p + mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, dparams) - # ] + [ - # (dp, mu*dp - learning_rate*T.grad(cost, p)) for p, dp in zip(self.params, dparams) - # ] + updates = momentum_updates(cost, self.params, learning_rate, mu) train_op = theano.function( inputs=[thX, thY], From 2799cf0d84189a2b460029163bfef86c79eb0d9c Mon Sep 17 00:00:00 2001 From: User Date: Mon, 13 Jul 2020 14:02:48 -0400 Subject: [PATCH 198/329] update --- cnn_class2/style_transfer1.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/cnn_class2/style_transfer1.py b/cnn_class2/style_transfer1.py index 46d12d3d..2d26ef2b 100644 --- a/cnn_class2/style_transfer1.py +++ b/cnn_class2/style_transfer1.py @@ -30,15 +30,24 @@ def VGG16_AvgPool(shape): # so get rid of the maxpool which throws away information vgg = VGG16(input_shape=shape, weights='imagenet', include_top=False) - new_model = Sequential() + # new_model = Sequential() + # for layer in vgg.layers: + # if layer.__class__ == MaxPooling2D: + # # replace it with average pooling + # new_model.add(AveragePooling2D()) + # else: + # new_model.add(layer) + + i = vgg.input + x = i for layer in vgg.layers: if layer.__class__ == MaxPooling2D: # replace it with average pooling - new_model.add(AveragePooling2D()) + x = AveragePooling2D()(x) else: - new_model.add(layer) + x = layer(x) - return new_model + return Model(i, x) def VGG16_AvgPool_CutOff(shape, num_convs): # there are 13 convolutions in total @@ -50,16 +59,25 @@ def VGG16_AvgPool_CutOff(shape, num_convs): return None model = VGG16_AvgPool(shape) - new_model = Sequential() + # new_model = Sequential() + # n = 0 + # for layer in model.layers: + # if layer.__class__ == Conv2D: + # n += 1 + # new_model.add(layer) + # if n >= num_convs: + # break + n = 0 + output = None for layer in model.layers: if layer.__class__ == Conv2D: n += 1 - new_model.add(layer) if n >= num_convs: + output = layer.output break - return new_model + return Model(model.input, output) def unpreprocess(img): From 8cea470c3b98b8eac42dccf69b766bb72eb3527e Mon Sep 17 00:00:00 2001 From: User Date: Mon, 13 Jul 2020 14:40:26 -0400 Subject: [PATCH 199/329] update --- cnn_class2/style_transfer1.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cnn_class2/style_transfer1.py b/cnn_class2/style_transfer1.py index 2d26ef2b..421a0a19 100644 --- a/cnn_class2/style_transfer1.py +++ b/cnn_class2/style_transfer1.py @@ -24,6 +24,10 @@ from scipy.optimize import fmin_l_bfgs_b +import tensorflow as tf +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + def VGG16_AvgPool(shape): # we want to account for features across the entire image From 7b8b3759d22eca2bf0f79186b5cf2ff259da373e Mon Sep 17 00:00:00 2001 From: User Date: Mon, 3 Aug 2020 00:10:38 -0400 Subject: [PATCH 200/329] update --- ab_testing/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ab_testing/extra_reading.txt b/ab_testing/extra_reading.txt index 668f896a..71360a2e 100644 --- a/ab_testing/extra_reading.txt +++ b/ab_testing/extra_reading.txt @@ -1,3 +1,6 @@ +The Unbiased Estimate of the Covariance Matrix +https://lazyprogrammer.me/covariance-matrix-divide-by-n-or-n-1/ + Algorithms for the multi-armed bandit problem https://www.cs.mcgill.ca/~vkules/bandits.pdf From 663013310b734156c442a0fcd569385247c041dd Mon Sep 17 00:00:00 2001 From: User Date: Sat, 8 Aug 2020 13:47:11 -0400 Subject: [PATCH 201/329] update --- ann_class2/adam.py | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/ann_class2/adam.py b/ann_class2/adam.py index 1ae7813a..3c0243de 100644 --- a/ann_class2/adam.py +++ b/ann_class2/adam.py @@ -105,10 +105,10 @@ def main(): t += 1 # apply updates to the params - W1 = W1 - lr0 * hat_mW1 / np.sqrt(hat_vW1 + eps) - b1 = b1 - lr0 * hat_mb1 / np.sqrt(hat_vb1 + eps) - W2 = W2 - lr0 * hat_mW2 / np.sqrt(hat_vW2 + eps) - b2 = b2 - lr0 * hat_mb2 / np.sqrt(hat_vb2 + eps) + W1 = W1 - lr0 * hat_mW1 / (np.sqrt(hat_vW1) + eps) + b1 = b1 - lr0 * hat_mb1 / (np.sqrt(hat_vb1) + eps) + W2 = W2 - lr0 * hat_mW2 / (np.sqrt(hat_vW2) + eps) + b2 = b2 - lr0 * hat_mb2 / (np.sqrt(hat_vb2) + eps) if j % print_period == 0: @@ -157,25 +157,28 @@ def main(): Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) - # updates + # derivatives gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 - cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 - dW2 = mu * dW2 + (1 - mu) * lr0 * gW2 / (np.sqrt(cache_W2) + eps) - W2 -= dW2 - gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 - cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 - db2 = mu * db2 + (1 - mu) * lr0 * gb2 / (np.sqrt(cache_b2) + eps) - b2 -= db2 - gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 - cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 - dW1 = mu * dW1 + (1 - mu) * lr0 * gW1 / (np.sqrt(cache_W1) + eps) - W1 -= dW1 - gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + + # caches + cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 + cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 + cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 cache_b1 = decay_rate*cache_b1 + (1 - decay_rate)*gb1*gb1 + + # momentum + dW2 = mu * dW2 + (1 - mu) * lr0 * gW2 / (np.sqrt(cache_W2) + eps) + db2 = mu * db2 + (1 - mu) * lr0 * gb2 / (np.sqrt(cache_b2) + eps) + dW1 = mu * dW1 + (1 - mu) * lr0 * gW1 / (np.sqrt(cache_W1) + eps) db1 = mu * db1 + (1 - mu) * lr0 * gb1 / (np.sqrt(cache_b1) + eps) + + # updates + W2 -= dW2 + b2 -= db2 + W1 -= dW1 b1 -= db1 if j % print_period == 0: From 4ef99d6dcf91693605e62cdb30895d1dc12577d5 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 8 Aug 2020 13:52:49 -0400 Subject: [PATCH 202/329] update --- ann_class2/rmsprop.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/ann_class2/rmsprop.py b/ann_class2/rmsprop.py index 2ec4c4ac..f0bb093e 100644 --- a/ann_class2/rmsprop.py +++ b/ann_class2/rmsprop.py @@ -97,21 +97,22 @@ def main(): pYbatch, Z = forward(Xbatch, W1, b1, W2, b2) # print "first batch cost:", cost(pYbatch, Ybatch) - # updates + # gradients gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2 - cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 - W2 -= lr0 * gW2 / (np.sqrt(cache_W2) + eps) - gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2 - cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 - b2 -= lr0 * gb2 / (np.sqrt(cache_b2) + eps) - gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1 - cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 - W1 -= lr0 * gW1 / (np.sqrt(cache_W1) + eps) - gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1 + + # caches + cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2 + cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2 + cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1 cache_b1 = decay_rate*cache_b1 + (1 - decay_rate)*gb1*gb1 + + # updates + W2 -= lr0 * gW2 / (np.sqrt(cache_W2) + eps) + b2 -= lr0 * gb2 / (np.sqrt(cache_b2) + eps) + W1 -= lr0 * gW1 / (np.sqrt(cache_W1) + eps) b1 -= lr0 * gb1 / (np.sqrt(cache_b1) + eps) if j % print_period == 0: From 9cfa1980c30327b177a0b4e9b849a51d411034da Mon Sep 17 00:00:00 2001 From: User Date: Tue, 18 Aug 2020 03:10:31 -0400 Subject: [PATCH 203/329] update --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e5308696..e42dba50 100644 --- a/README.md +++ b/README.md @@ -19,9 +19,9 @@ I've noticed that many people have out-of-date forks. Thus, I recommend not fork Direct Course Links =================== -PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Apr 2020) -https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP - +PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Aug 2020) +*** note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc. +https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP5 Tensorflow 2.0: Deep Learning and Artificial Intelligence (Main Course - special discount link) From 95538d5958e94274a0ff8c73b97e77431821f286 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 18 Aug 2020 03:11:06 -0400 Subject: [PATCH 204/329] update --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e42dba50..e503449a 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,9 @@ Direct Course Links =================== PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Aug 2020) + *** note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc. + https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP5 Tensorflow 2.0: Deep Learning and Artificial Intelligence From f11b375f3953024c3efe34aa92fd4ca8379d0d79 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 21 Aug 2020 03:02:02 -0400 Subject: [PATCH 205/329] update --- nlp_class2/glove_tf.py | 19 +++++++------ nlp_class2/recursive_tensorflow.py | 21 ++++++++------- nlp_class2/rntn_tensorflow_rnn.py | 39 ++++++++++++++------------- nlp_class2/word2vec_tf.py | 29 +++++++++++--------- recommenders/rbm_tf_k.py | 31 +++++++++++---------- recommenders/rbm_tf_k_faster.py | 35 +++++++++++++----------- unsupervised_class2/autoencoder_tf.py | 33 ++++++++++++----------- unsupervised_class2/rbm_tf.py | 28 +++++++++---------- 8 files changed, 128 insertions(+), 107 deletions(-) diff --git a/nlp_class2/glove_tf.py b/nlp_class2/glove_tf.py index aa8371ad..9db18bb4 100644 --- a/nlp_class2/glove_tf.py +++ b/nlp_class2/glove_tf.py @@ -22,6 +22,9 @@ from rnn_class.util import get_wikipedia_data from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + class Glove: @@ -119,22 +122,22 @@ def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, tfb = tf.Variable(b.reshape(V, 1).astype(np.float32)) tfU = tf.Variable(U.astype(np.float32)) tfc = tf.Variable(c.reshape(1, V).astype(np.float32)) - tfLogX = tf.placeholder(tf.float32, shape=(V, V)) - tffX = tf.placeholder(tf.float32, shape=(V, V)) + tfLogX = tf.compat.v1.placeholder(tf.float32, shape=(V, V)) + tffX = tf.compat.v1.placeholder(tf.float32, shape=(V, V)) - delta = tf.matmul(tfW, tf.transpose(tfU)) + tfb + tfc + mu - tfLogX - cost = tf.reduce_sum(tffX * delta * delta) + delta = tf.matmul(tfW, tf.transpose(a=tfU)) + tfb + tfc + mu - tfLogX + cost = tf.reduce_sum(input_tensor=tffX * delta * delta) regularized_cost = cost for param in (tfW, tfU): - regularized_cost += reg*tf.reduce_sum(param * param) + regularized_cost += reg*tf.reduce_sum(input_tensor=param * param) - train_op = tf.train.MomentumOptimizer( + train_op = tf.compat.v1.train.MomentumOptimizer( learning_rate, momentum=0.9 ).minimize(regularized_cost) # train_op = tf.train.AdamOptimizer(1e-3).minimize(regularized_cost) - init = tf.global_variables_initializer() - session = tf.InteractiveSession() + init = tf.compat.v1.global_variables_initializer() + session = tf.compat.v1.InteractiveSession() session.run(init) costs = [] diff --git a/nlp_class2/recursive_tensorflow.py b/nlp_class2/recursive_tensorflow.py index 4c43e3df..02f02360 100644 --- a/nlp_class2/recursive_tensorflow.py +++ b/nlp_class2/recursive_tensorflow.py @@ -17,6 +17,9 @@ from datetime import datetime from util import init_weight, get_ptb_data, display_tree +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + def get_labels(tree): # must be returned in the same order as tree logits are returned @@ -73,22 +76,22 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): cost = self.get_cost(logits, labels, reg) costs.append(cost) - prediction = tf.argmax(logits, 1) + prediction = tf.argmax(input=logits, axis=1) predictions.append(prediction) - train_op = tf.train.MomentumOptimizer(lr, mu).minimize(cost) + train_op = tf.compat.v1.train.MomentumOptimizer(lr, mu).minimize(cost) train_ops.append(train_op) # save for later so we don't have to recompile self.predictions = predictions self.all_labels = all_labels - self.saver = tf.train.Saver() + self.saver = tf.compat.v1.train.Saver() - init = tf.initialize_all_variables() + init = tf.compat.v1.initialize_all_variables() actual_costs = [] per_epoch_costs = [] correct_rates = [] - with tf.Session() as session: + with tf.compat.v1.Session() as session: session.run(init) for i in range(epochs): @@ -136,7 +139,7 @@ def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5): def get_cost(self, logits, labels, reg): cost = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels ) @@ -150,7 +153,7 @@ def get_cost(self, logits, labels, reg): def get_output_recursive(self, tree, list_of_logits, is_root=True): if tree.word is not None: # this is a leaf node - x = tf.nn.embedding_lookup(self.We, [tree.word]) + x = tf.nn.embedding_lookup(params=self.We, ids=[tree.word]) else: # this node has children x1 = self.get_output_recursive(tree.left, list_of_logits, is_root=False) @@ -197,12 +200,12 @@ def score(self, trees): labels = get_labels(t) all_labels.append(labels) - prediction = tf.argmax(logits, 1) + prediction = tf.argmax(input=logits, axis=1) predictions.append(prediction) n_correct = 0 n_total = 0 - with tf.Session() as session: + with tf.compat.v1.Session() as session: self.saver.restore(session, "recursive.ckpt") for prediction, y in zip(predictions, all_labels): p = session.run(prediction) diff --git a/nlp_class2/rntn_tensorflow_rnn.py b/nlp_class2/rntn_tensorflow_rnn.py index 29caba32..816ff4a2 100644 --- a/nlp_class2/rntn_tensorflow_rnn.py +++ b/nlp_class2/rntn_tensorflow_rnn.py @@ -17,6 +17,9 @@ from datetime import datetime from sklearn.metrics import f1_score +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + class RecursiveNN: @@ -54,10 +57,10 @@ def fit(self, trees, test_trees, reg=1e-3, epochs=8, train_inner_nodes=False): self.weights = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.Wo] - words = tf.placeholder(tf.int32, shape=(None,), name='words') - left_children = tf.placeholder(tf.int32, shape=(None,), name='left_children') - right_children = tf.placeholder(tf.int32, shape=(None,), name='right_children') - labels = tf.placeholder(tf.int32, shape=(None,), name='labels') + words = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='words') + left_children = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='left_children') + right_children = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='right_children') + labels = tf.compat.v1.placeholder(tf.int32, shape=(None,), name='labels') # save for later self.words = words @@ -89,9 +92,9 @@ def recurrence(hiddens, n): # any non-word will have index -1 h_n = tf.cond( - w >= 0, - lambda: tf.nn.embedding_lookup(self.We, w), - lambda: recursive_net_transform(hiddens, n) + pred=w >= 0, + true_fn=lambda: tf.nn.embedding_lookup(params=self.We, ids=w), + false_fn=lambda: recursive_net_transform(hiddens, n) ) hiddens = hiddens.write(n, h_n) n = tf.add(n, 1) @@ -100,7 +103,7 @@ def recurrence(hiddens, n): def condition(hiddens, n): # loop should continue while n < len(words) - return tf.less(n, tf.shape(words)[0]) + return tf.less(n, tf.shape(input=words)[0]) hiddens = tf.TensorArray( @@ -112,44 +115,44 @@ def condition(hiddens, n): ) hiddens, _ = tf.while_loop( - condition, - recurrence, - [hiddens, tf.constant(0)], + cond=condition, + body=recurrence, + loop_vars=[hiddens, tf.constant(0)], parallel_iterations=1 ) h = hiddens.stack() logits = tf.matmul(h, self.Wo) + self.bo - prediction_op = tf.argmax(logits, axis=1) + prediction_op = tf.argmax(input=logits, axis=1) self.prediction_op = prediction_op rcost = reg*sum(tf.nn.l2_loss(p) for p in self.weights) if train_inner_nodes: # filter out -1s - labeled_indices = tf.where(labels >= 0) + labeled_indices = tf.compat.v1.where(labels >= 0) cost_op = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.gather(logits, labeled_indices), labels=tf.gather(labels, labeled_indices), ) ) + rcost else: cost_op = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits[-1], labels=labels[-1], ) ) + rcost - train_op = tf.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op) + train_op = tf.compat.v1.train.AdagradOptimizer(learning_rate=8e-3).minimize(cost_op) # train_op = tf.train.MomentumOptimizer(learning_rate=8e-3, momentum=0.9).minimize(cost_op) # NOTE: If you're using GPU, InteractiveSession breaks # AdagradOptimizer and some other optimizers # change to tf.Session() if so. - self.session = tf.Session() - init_op = tf.global_variables_initializer() + self.session = tf.compat.v1.Session() + init_op = tf.compat.v1.global_variables_initializer() self.session.run(init_op) diff --git a/nlp_class2/word2vec_tf.py b/nlp_class2/word2vec_tf.py index fee4ad99..d272b003 100644 --- a/nlp_class2/word2vec_tf.py +++ b/nlp_class2/word2vec_tf.py @@ -25,6 +25,9 @@ import sys import string +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + # unfortunately these work different ways @@ -131,36 +134,36 @@ def train_model(savedir): # create the model - tf_input = tf.placeholder(tf.int32, shape=(None,)) - tf_negword = tf.placeholder(tf.int32, shape=(None,)) - tf_context = tf.placeholder(tf.int32, shape=(None,)) # targets (context) + tf_input = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + tf_negword = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + tf_context = tf.compat.v1.placeholder(tf.int32, shape=(None,)) # targets (context) tfW = tf.Variable(W) tfV = tf.Variable(V.T) # biases = tf.Variable(np.zeros(vocab_size, dtype=np.float32)) def dot(A, B): C = A * B - return tf.reduce_sum(C, axis=1) + return tf.reduce_sum(input_tensor=C, axis=1) # correct middle word output - emb_input = tf.nn.embedding_lookup(tfW, tf_input) # 1 x D - emb_output = tf.nn.embedding_lookup(tfV, tf_context) # N x D + emb_input = tf.nn.embedding_lookup(params=tfW, ids=tf_input) # 1 x D + emb_output = tf.nn.embedding_lookup(params=tfV, ids=tf_context) # N x D correct_output = dot(emb_input, emb_output) # N # emb_input = tf.transpose(emb_input, (1, 0)) # correct_output = tf.matmul(emb_output, emb_input) pos_loss = tf.nn.sigmoid_cross_entropy_with_logits( - labels=tf.ones(tf.shape(correct_output)), logits=correct_output) + labels=tf.ones(tf.shape(input=correct_output)), logits=correct_output) # incorrect middle word output - emb_input = tf.nn.embedding_lookup(tfW, tf_negword) + emb_input = tf.nn.embedding_lookup(params=tfW, ids=tf_negword) incorrect_output = dot(emb_input, emb_output) # emb_input = tf.transpose(emb_input, (1, 0)) # incorrect_output = tf.matmul(emb_output, emb_input) neg_loss = tf.nn.sigmoid_cross_entropy_with_logits( - labels=tf.zeros(tf.shape(incorrect_output)), logits=incorrect_output) + labels=tf.zeros(tf.shape(input=incorrect_output)), logits=incorrect_output) # total loss - loss = tf.reduce_mean(pos_loss) + tf.reduce_mean(neg_loss) + loss = tf.reduce_mean(input_tensor=pos_loss) + tf.reduce_mean(input_tensor=neg_loss) # output = hidden.dot(tfV) @@ -179,12 +182,12 @@ def dot(A, B): # optimizer # train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) - train_op = tf.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss) + train_op = tf.compat.v1.train.MomentumOptimizer(0.1, momentum=0.9).minimize(loss) # train_op = tf.train.AdamOptimizer(1e-2).minimize(loss) # make session - session = tf.Session() - init_op = tf.global_variables_initializer() + session = tf.compat.v1.Session() + init_op = tf.compat.v1.global_variables_initializer() session.run(init_op) diff --git a/recommenders/rbm_tf_k.py b/recommenders/rbm_tf_k.py index 6043085e..836a5595 100644 --- a/recommenders/rbm_tf_k.py +++ b/recommenders/rbm_tf_k.py @@ -14,6 +14,9 @@ from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz from datetime import datetime +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + # is it possible to one-hot encode the data prior to feeding it # into the neural network, so that we don't have to do it on the fly? @@ -84,13 +87,13 @@ def __init__(self, D, M, K): def build(self, D, M, K): # params - self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) + self.W = tf.Variable(tf.random.normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) self.c = tf.Variable(np.zeros(M).astype(np.float32)) self.b = tf.Variable(np.zeros((D, K)).astype(np.float32)) # data - self.X_in = tf.placeholder(tf.float32, shape=(None, D, K)) - self.mask = tf.placeholder(tf.float32, shape=(None, D, K)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D, K)) + self.mask = tf.compat.v1.placeholder(tf.float32, shape=(None, D, K)) # conditional probabilities # NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2 @@ -99,21 +102,21 @@ def build(self, D, M, K): self.p_h_given_v = p_h_given_v # save for later # draw a sample from p(h | v) - r = tf.random_uniform(shape=tf.shape(p_h_given_v)) - H = tf.to_float(r < p_h_given_v) + r = tf.random.uniform(shape=tf.shape(input=p_h_given_v)) + H = tf.cast(r < p_h_given_v, dtype=tf.float32) # draw a sample from p(v | h) # note: we don't have to actually do the softmax logits = dot2(H, self.W) + self.b - cdist = tf.distributions.Categorical(logits=logits) + cdist = tf.compat.v1.distributions.Categorical(logits=logits) X_sample = cdist.sample() # shape is (N, D) X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K) X_sample = X_sample * self.mask # missing ratings shouldn't contribute to objective # build the objective - objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample)) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + objective = tf.reduce_mean(input_tensor=self.free_energy(self.X_in)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample)) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective) # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) # build the cost @@ -121,8 +124,8 @@ def build(self, D, M, K): # just to observe what happens during training logits = self.forward_logits(self.X_in) self.cost = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - labels=self.X_in, + input_tensor=tf.nn.softmax_cross_entropy_with_logits( + labels=tf.stop_gradient(self.X_in), logits=logits, ) ) @@ -130,8 +133,8 @@ def build(self, D, M, K): # to get the output self.output_visible = self.forward_output(self.X_in) - initop = tf.global_variables_initializer() - self.session = tf.Session() + initop = tf.compat.v1.global_variables_initializer() + self.session = tf.compat.v1.Session() self.session.run(initop) def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True): @@ -202,10 +205,10 @@ def fit(self, X, mask, X_test, mask_test, epochs=10, batch_sz=256, show_fig=True plt.show() def free_energy(self, V): - first_term = -tf.reduce_sum(dot1(V, self.b)) + first_term = -tf.reduce_sum(input_tensor=dot1(V, self.b)) second_term = -tf.reduce_sum( # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), - tf.nn.softplus(dot1(V, self.W) + self.c), + input_tensor=tf.nn.softplus(dot1(V, self.W) + self.c), axis=1 ) return first_term + second_term diff --git a/recommenders/rbm_tf_k_faster.py b/recommenders/rbm_tf_k_faster.py index 75100ba0..9a1a242a 100644 --- a/recommenders/rbm_tf_k_faster.py +++ b/recommenders/rbm_tf_k_faster.py @@ -14,6 +14,9 @@ from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz from datetime import datetime +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + def dot1(V, W): # V is N x D x K (batch of visible units) @@ -38,12 +41,12 @@ def __init__(self, D, M, K): def build(self, D, M, K): # params - self.W = tf.Variable(tf.random_normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) + self.W = tf.Variable(tf.random.normal(shape=(D, K, M)) * np.sqrt(2.0 / M)) self.c = tf.Variable(np.zeros(M).astype(np.float32)) self.b = tf.Variable(np.zeros((D, K)).astype(np.float32)) # data - self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) # one hot encode X # first, make each rating an int @@ -57,13 +60,13 @@ def build(self, D, M, K): self.p_h_given_v = p_h_given_v # save for later # draw a sample from p(h | v) - r = tf.random_uniform(shape=tf.shape(p_h_given_v)) - H = tf.to_float(r < p_h_given_v) + r = tf.random.uniform(shape=tf.shape(input=p_h_given_v)) + H = tf.cast(r < p_h_given_v, dtype=tf.float32) # draw a sample from p(v | h) # note: we don't have to actually do the softmax logits = dot2(H, self.W) + self.b - cdist = tf.distributions.Categorical(logits=logits) + cdist = tf.compat.v1.distributions.Categorical(logits=logits) X_sample = cdist.sample() # shape is (N, D) X_sample = tf.one_hot(X_sample, depth=K) # turn it into (N, D, K) @@ -74,8 +77,8 @@ def build(self, D, M, K): # build the objective - objective = tf.reduce_mean(self.free_energy(X)) - tf.reduce_mean(self.free_energy(X_sample)) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + objective = tf.reduce_mean(input_tensor=self.free_energy(X)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample)) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective) # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) # build the cost @@ -83,8 +86,8 @@ def build(self, D, M, K): # just to observe what happens during training logits = self.forward_logits(X) self.cost = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits( - labels=X, + input_tensor=tf.nn.softmax_cross_entropy_with_logits( + labels=tf.stop_gradient(X), logits=logits, ) ) @@ -98,17 +101,17 @@ def build(self, D, M, K): self.pred = tf.tensordot(self.output_visible, self.one_to_ten, axes=[[2], [0]]) mask = tf.cast(self.X_in > 0, tf.float32) se = mask * (self.X_in - self.pred) * (self.X_in - self.pred) - self.sse = tf.reduce_sum(se) + self.sse = tf.reduce_sum(input_tensor=se) # test SSE - self.X_test = tf.placeholder(tf.float32, shape=(None, D)) + self.X_test = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) mask = tf.cast(self.X_test > 0, tf.float32) tse = mask * (self.X_test - self.pred) * (self.X_test - self.pred) - self.tsse = tf.reduce_sum(tse) + self.tsse = tf.reduce_sum(input_tensor=tse) - initop = tf.global_variables_initializer() - self.session = tf.Session() + initop = tf.compat.v1.global_variables_initializer() + self.session = tf.compat.v1.Session() self.session.run(initop) def fit(self, X, X_test, epochs=10, batch_sz=256, show_fig=True): @@ -168,10 +171,10 @@ def fit(self, X, X_test, epochs=10, batch_sz=256, show_fig=True): plt.show() def free_energy(self, V): - first_term = -tf.reduce_sum(dot1(V, self.b)) + first_term = -tf.reduce_sum(input_tensor=dot1(V, self.b)) second_term = -tf.reduce_sum( # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), - tf.nn.softplus(dot1(V, self.W) + self.c), + input_tensor=tf.nn.softplus(dot1(V, self.W) + self.c), axis=1 ) return first_term + second_term diff --git a/unsupervised_class2/autoencoder_tf.py b/unsupervised_class2/autoencoder_tf.py index 08c3f156..93d2b87b 100644 --- a/unsupervised_class2/autoencoder_tf.py +++ b/unsupervised_class2/autoencoder_tf.py @@ -12,6 +12,9 @@ from sklearn.utils import shuffle from util import error_rate, getKaggleMNIST +if tf.__version__.startswith('2'): + tf.compat.v1.disable_eager_execution() + class AutoEncoder(object): def __init__(self, D, M, an_id): @@ -23,11 +26,11 @@ def set_session(self, session): self.session = session def build(self, D, M): - self.W = tf.Variable(tf.random_normal(shape=(D, M))) + self.W = tf.Variable(tf.random.normal(shape=(D, M))) self.bh = tf.Variable(np.zeros(M).astype(np.float32)) self.bo = tf.Variable(np.zeros(D).astype(np.float32)) - self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) self.Z = self.forward_hidden(self.X_in) # for transform() later self.X_hat = self.forward_output(self.X_in) @@ -36,13 +39,13 @@ def build(self, D, M): # will have numerical stability issues if X_hat = 0 or 1 logits = self.forward_logits(self.X_in) self.cost = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( + input_tensor=tf.nn.sigmoid_cross_entropy_with_logits( labels=self.X_in, logits=logits, ) ) - self.train_op = tf.train.AdamOptimizer(1e-1).minimize(self.cost) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-1).minimize(self.cost) # self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(self.cost) def fit(self, X, epochs=1, batch_sz=100, show_fig=False): @@ -82,7 +85,7 @@ def forward_hidden(self, X): def forward_logits(self, X): Z = self.forward_hidden(X) - return tf.matmul(Z, tf.transpose(self.W)) + self.bo + return tf.matmul(Z, tf.transpose(a=self.W)) + self.bo def forward_output(self, X): return tf.nn.sigmoid(self.forward_logits(X)) @@ -107,22 +110,22 @@ def set_session(self, session): def build_final_layer(self, D, M, K): # initialize logistic regression layer - self.W = tf.Variable(tf.random_normal(shape=(M, K))) + self.W = tf.Variable(tf.random.normal(shape=(M, K))) self.b = tf.Variable(np.zeros(K).astype(np.float32)) - self.X = tf.placeholder(tf.float32, shape=(None, D)) - labels = tf.placeholder(tf.int32, shape=(None,)) + self.X = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) + labels = tf.compat.v1.placeholder(tf.int32, shape=(None,)) self.Y = labels logits = self.forward(self.X) self.cost = tf.reduce_mean( - tf.nn.sparse_softmax_cross_entropy_with_logits( + input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels ) ) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost) - self.prediction = tf.argmax(logits, 1) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(self.cost) + self.prediction = tf.argmax(input=logits, axis=1) def fit(self, X, Y, Xtest, Ytest, pretrain=True, epochs=1, batch_sz=100): N = len(X) @@ -184,8 +187,8 @@ def test_pretraining_dnn(): _, D = Xtrain.shape K = len(set(Ytrain)) dnn = DNN(D, [1000, 750, 500], K) - init_op = tf.global_variables_initializer() - with tf.Session() as session: + init_op = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as session: session.run(init_op) dnn.set_session(session) dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=10) @@ -198,8 +201,8 @@ def test_single_autoencoder(): _, D = Xtrain.shape autoencoder = AutoEncoder(D, 300, 0) - init_op = tf.global_variables_initializer() - with tf.Session() as session: + init_op = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as session: session.run(init_op) autoencoder.set_session(session) autoencoder.fit(Xtrain, show_fig=True) diff --git a/unsupervised_class2/rbm_tf.py b/unsupervised_class2/rbm_tf.py index b39015b5..3b3516ce 100644 --- a/unsupervised_class2/rbm_tf.py +++ b/unsupervised_class2/rbm_tf.py @@ -25,13 +25,13 @@ def set_session(self, session): def build(self, D, M): # params - self.W = tf.Variable(tf.random_normal(shape=(D, M)) * np.sqrt(2.0 / M)) + self.W = tf.Variable(tf.random.normal(shape=(D, M)) * np.sqrt(2.0 / M)) # note: without limiting variance, you get numerical stability issues self.c = tf.Variable(np.zeros(M).astype(np.float32)) self.b = tf.Variable(np.zeros(D).astype(np.float32)) # data - self.X_in = tf.placeholder(tf.float32, shape=(None, D)) + self.X_in = tf.compat.v1.placeholder(tf.float32, shape=(None, D)) # conditional probabilities # NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2 @@ -42,21 +42,21 @@ def build(self, D, M): # probs=p_h_given_v, # dtype=tf.float32 # ) - r = tf.random_uniform(shape=tf.shape(p_h_given_v)) - H = tf.to_float(r < p_h_given_v) + r = tf.random.uniform(shape=tf.shape(input=p_h_given_v)) + H = tf.cast(r < p_h_given_v, dtype=tf.float32) - p_v_given_h = tf.nn.sigmoid(tf.matmul(H, tf.transpose(self.W)) + self.b) + p_v_given_h = tf.nn.sigmoid(tf.matmul(H, tf.transpose(a=self.W)) + self.b) # self.rng_v_given_h = tf.contrib.distributions.Bernoulli( # probs=p_v_given_h, # dtype=tf.float32 # ) - r = tf.random_uniform(shape=tf.shape(p_v_given_h)) - X_sample = tf.to_float(r < p_v_given_h) + r = tf.random.uniform(shape=tf.shape(input=p_v_given_h)) + X_sample = tf.cast(r < p_v_given_h, dtype=tf.float32) # build the objective - objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample)) - self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective) + objective = tf.reduce_mean(input_tensor=self.free_energy(self.X_in)) - tf.reduce_mean(input_tensor=self.free_energy(X_sample)) + self.train_op = tf.compat.v1.train.AdamOptimizer(1e-2).minimize(objective) # self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective) # build the cost @@ -64,7 +64,7 @@ def build(self, D, M): # just to observe what happens during training logits = self.forward_logits(self.X_in) self.cost = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( + input_tensor=tf.nn.sigmoid_cross_entropy_with_logits( labels=self.X_in, logits=logits, ) @@ -96,7 +96,7 @@ def free_energy(self, V): second_term = -tf.reduce_sum( # tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)), - tf.nn.softplus(tf.matmul(V, self.W) + self.c), + input_tensor=tf.nn.softplus(tf.matmul(V, self.W) + self.c), axis=1 ) @@ -107,7 +107,7 @@ def forward_hidden(self, X): def forward_logits(self, X): Z = self.forward_hidden(X) - return tf.matmul(Z, tf.transpose(self.W)) + self.b + return tf.matmul(Z, tf.transpose(a=self.W)) + self.b def forward_output(self, X): return tf.nn.sigmoid(self.forward_logits(X)) @@ -128,8 +128,8 @@ def main(): _, D = Xtrain.shape K = len(set(Ytrain)) dnn = DNN(D, [1000, 750, 500], K, UnsupervisedModel=RBM) - init_op = tf.global_variables_initializer() - with tf.Session() as session: + init_op = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as session: session.run(init_op) dnn.set_session(session) dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=10) From b6a6b2f985ee066eb1be9aa895a04f837e912871 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 24 Aug 2020 22:48:05 -0400 Subject: [PATCH 206/329] test --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e503449a..b7d2a891 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ Direct Course Links PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Aug 2020) -*** note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc. +*** Note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc. https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP5 From 0168563fb83f8779430f54265ed4b96923c3538e Mon Sep 17 00:00:00 2001 From: User Date: Mon, 24 Aug 2020 22:52:21 -0400 Subject: [PATCH 207/329] test --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b7d2a891..c2ab6996 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ Direct Course Links PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Aug 2020) -*** Note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc. +*** Note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc.. https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP5 From f0070c8c6c766a210e4b7c55a2ff82c1f7953a35 Mon Sep 17 00:00:00 2001 From: User Date: Sun, 30 Aug 2020 04:53:04 -0400 Subject: [PATCH 208/329] update --- supervised_class2/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/supervised_class2/extra_reading.txt b/supervised_class2/extra_reading.txt index f826cb04..79711307 100644 --- a/supervised_class2/extra_reading.txt +++ b/supervised_class2/extra_reading.txt @@ -8,4 +8,7 @@ Explaining AdaBoost http://rob.schapire.net/papers/explaining-adaboost.pdf Improved Boosting Algorithms Using Confidence-rated Predictions -https://sci2s.ugr.es/keel/pdf/algorithm/articulo/1999-ML-Improved%20boosting%20algorithms%20using%20confidence-rated%20predictions%20(Schapire%20y%20Singer).pdf \ No newline at end of file +https://sci2s.ugr.es/keel/pdf/algorithm/articulo/1999-ML-Improved%20boosting%20algorithms%20using%20confidence-rated%20predictions%20(Schapire%20y%20Singer).pdf + +Why does the bootstrap work? +http://www.stat.cmu.edu/~larry/=sml/Boot.pdf \ No newline at end of file From 50a668583c1f6d8b475641675662322200275f65 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 31 Aug 2020 02:03:11 -0400 Subject: [PATCH 209/329] update --- unsupervised_class3/util.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) mode change 100644 => 100755 unsupervised_class3/util.py diff --git a/unsupervised_class3/util.py b/unsupervised_class3/util.py old mode 100644 new mode 100755 index c8194c80..935eb634 --- a/unsupervised_class3/util.py +++ b/unsupervised_class3/util.py @@ -11,7 +11,26 @@ import zipfile import numpy as np import pandas as pd -from scipy.misc import imread, imsave, imresize + +try: + # new version doesn't support + from scipy.misc import imread, imsave, imresize +except: + from PIL import Image + def imread(fn): + im = Image.open(fn) + return np.array(im) + + def imsave(fn, arr): + im = Image.fromarray(arr) + im.save(fn) + + def imresize(arr, sz): + im = Image.fromarray(arr) + im.resize(sz) + return np.array(im) + + from glob import glob from tqdm import tqdm from sklearn.utils import shuffle From 1e5c8698ecb52e0a7d01bdd4a2f2ad8a8b5743d9 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 8 Sep 2020 00:35:05 -0400 Subject: [PATCH 210/329] update --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index c2ab6996..212e538d 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,11 @@ PyTorch: Deep Learning and Artificial Intelligence (special discount link for fu https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP5 + +Financial Engineering and Artificial Intelligence in Python +https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP + + Tensorflow 2.0: Deep Learning and Artificial Intelligence (Main Course - special discount link) https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 From 2b783e426f008f4fa9dc0fcdda75dbbb7a5f77ec Mon Sep 17 00:00:00 2001 From: User Date: Thu, 17 Sep 2020 12:15:47 -0400 Subject: [PATCH 211/329] update --- ab_testing/bayesian_bandit.py | 36 ++++++----- ab_testing/bayesian_normal.py | 86 +++++++++++++++++++++++++++ ab_testing/bayesian_starter.py | 78 ++++++++++++++++++++++++ ab_testing/comparing_epsilons.py | 89 ++++++++++++++++++++++++++++ ab_testing/epsilon_greedy.py | 88 +++++++++++++++++++++++++++ ab_testing/epsilon_greedy_starter.py | 88 +++++++++++++++++++++++++++ ab_testing/optimistic.py | 71 ++++++++++++++++++++++ ab_testing/optimistic_starter.py | 71 ++++++++++++++++++++++ ab_testing/ucb1.py | 81 +++++++++++++++++++++++++ ab_testing/ucb1_starter.py | 81 +++++++++++++++++++++++++ 10 files changed, 753 insertions(+), 16 deletions(-) create mode 100644 ab_testing/bayesian_normal.py create mode 100644 ab_testing/bayesian_starter.py create mode 100755 ab_testing/comparing_epsilons.py create mode 100755 ab_testing/epsilon_greedy.py create mode 100755 ab_testing/epsilon_greedy_starter.py create mode 100644 ab_testing/optimistic.py create mode 100644 ab_testing/optimistic_starter.py create mode 100644 ab_testing/ucb1.py create mode 100644 ab_testing/ucb1_starter.py diff --git a/ab_testing/bayesian_bandit.py b/ab_testing/bayesian_bandit.py index a930cf2b..61e8f812 100644 --- a/ab_testing/bayesian_bandit.py +++ b/ab_testing/bayesian_bandit.py @@ -12,15 +12,17 @@ from scipy.stats import beta +# np.random.seed(2) NUM_TRIALS = 2000 BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] -class Bandit(object): +class Bandit: def __init__(self, p): self.p = p self.a = 1 self.b = 1 + self.N = 0 # for information only def pull(self): return np.random.random() < self.p @@ -31,14 +33,15 @@ def sample(self): def update(self, x): self.a += x self.b += 1 - x + self.N += 1 def plot(bandits, trial): x = np.linspace(0, 1, 200) for b in bandits: y = beta.pdf(x, b.a, b.b) - plt.plot(x, y, label="real p: %.4f" % b.p) - plt.title("Bandit distributions after %s trials" % trial) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") plt.legend() plt.show() @@ -47,27 +50,28 @@ def experiment(): bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) - # take a sample from each bandit - bestb = None - maxsample = -1 - allsamples = [] # let's collect these just to print for debugging - for b in bandits: - sample = b.sample() - allsamples.append("%.4f" % sample) - if sample > maxsample: - maxsample = sample - bestb = b + # plot the posteriors if i in sample_points: - print("current samples: %s" % allsamples) plot(bandits, i) # pull the arm for the bandit with the largest sample - x = bestb.pull() + x = bandits[j].pull() + + # update rewards + rewards[i] = x # update the distribution for the bandit whose arm we just pulled - bestb.update(x) + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) if __name__ == "__main__": diff --git a/ab_testing/bayesian_normal.py b/ab_testing/bayesian_normal.py new file mode 100644 index 00000000..4305e0f7 --- /dev/null +++ b/ab_testing/bayesian_normal.py @@ -0,0 +1,86 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + + +np.random.seed(1) +NUM_TRIALS = 2000 +BANDIT_MEANS = [1, 2, 3] + + +class Bandit: + def __init__(self, true_mean): + self.true_mean = true_mean + # parameters for mu - prior is N(0,1) + self.m = 0 + self.lambda_ = 1 + self.sum_x = 0 # for convenience + self.tau = 1 + self.N = 0 + + def pull(self): + return np.random.randn() / np.sqrt(self.tau) + self.true_mean + + def sample(self): + return np.random.randn() / np.sqrt(self.lambda_) + self.m + + def update(self, x): + self.lambda_ += self.tau + self.sum_x += x + self.m = self.tau*self.sum_x / self.lambda_ + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(-3, 6, 200) + for b in bandits: + y = norm.pdf(x, b.m, np.sqrt(1. / b.lambda_)) + plt.plot(x, y, label=f"real mean: {b.true_mean:.4f}, num plays: {b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def run_experiment(): + bandits = [Bandit(m) for m in BANDIT_MEANS] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.empty(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = np.argmax([b.sample() for b in bandits]) + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # update rewards + rewards[i] = x + + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + for m in BANDIT_MEANS: + plt.plot(np.ones(NUM_TRIALS)*m) + plt.show() + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + + diff --git a/ab_testing/bayesian_starter.py b/ab_testing/bayesian_starter.py new file mode 100644 index 00000000..68e12f75 --- /dev/null +++ b/ab_testing/bayesian_starter.py @@ -0,0 +1,78 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import beta + + +# np.random.seed(2) +NUM_TRIALS = 2000 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + self.p = p + self.a = # TODO + self.b = # TODO + self.N = 0 # for information only + + def pull(self): + return np.random.random() < self.p + + def sample(self): + return # TODO - draw a sample from Beta(a, b) + + def update(self, x): + self.a = # TODO + self.b = # TODO + self.N += 1 + + +def plot(bandits, trial): + x = np.linspace(0, 1, 200) + for b in bandits: + y = beta.pdf(x, b.a, b.b) + plt.plot(x, y, label=f"real p: {b.p:.4f}, win rate = {b.a - 1}/{b.N}") + plt.title(f"Bandit distributions after {trial} trials") + plt.legend() + plt.show() + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + sample_points = [5,10,20,50,100,200,500,1000,1500,1999] + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # Thompson sampling + j = # TODO + + # plot the posteriors + if i in sample_points: + plot(bandits, i) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/comparing_epsilons.py b/ab_testing/comparing_epsilons.py new file mode 100755 index 00000000..8fe885c9 --- /dev/null +++ b/ab_testing/comparing_epsilons.py @@ -0,0 +1,89 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +class BanditArm: + def __init__(self, m): + self.m = m + self.m_estimate = 0 + self.N = 0 + + def pull(self): + return np.random.randn() + self.m + + def update(self, x): + self.N += 1 + self.m_estimate = (1 - 1.0/self.N)*self.m_estimate + 1.0/self.N*x + + +def run_experiment(m1, m2, m3, eps, N): + bandits = [BanditArm(m1), BanditArm(m2), BanditArm(m3)] + + # count number of suboptimal choices + means = np.array([m1, m2, m3]) + true_best = np.argmax(means) + count_suboptimal = 0 + + data = np.empty(N) + + for i in range(N): + # epsilon greedy + p = np.random.random() + if p < eps: + j = np.random.choice(len(bandits)) + else: + j = np.argmax([b.m_estimate for b in bandits]) + x = bandits[j].pull() + bandits[j].update(x) + + if j != true_best: + count_suboptimal += 1 + + # for the plot + data[i] = x + cumulative_average = np.cumsum(data) / (np.arange(N) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(N)*m1) + plt.plot(np.ones(N)*m2) + plt.plot(np.ones(N)*m3) + plt.xscale('log') + plt.show() + + for b in bandits: + print(b.m_estimate) + + print("percent suboptimal for epsilon = %s:" % eps, float(count_suboptimal) / N) + + return cumulative_average + +if __name__ == '__main__': + m1, m2, m3 = 1.5, 2.5, 3.5 + c_1 = run_experiment(m1, m2, m3, 0.1, 100000) + c_05 = run_experiment(m1, m2, m3, 0.05, 100000) + c_01 = run_experiment(m1, m2, m3, 0.01, 100000) + + # log scale plot + plt.plot(c_1, label='eps = 0.1') + plt.plot(c_05, label='eps = 0.05') + plt.plot(c_01, label='eps = 0.01') + plt.legend() + plt.xscale('log') + plt.show() + + + # linear plot + plt.plot(c_1, label='eps = 0.1') + plt.plot(c_05, label='eps = 0.05') + plt.plot(c_01, label='eps = 0.01') + plt.legend() + plt.show() + diff --git a/ab_testing/epsilon_greedy.py b/ab_testing/epsilon_greedy.py new file mode 100755 index 00000000..512c70c2 --- /dev/null +++ b/ab_testing/epsilon_greedy.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class BanditArm: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [BanditArm(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = np.random.randint(len(bandits)) + else: + num_times_exploited += 1 + j = np.argmax([b.p_estimate for b in bandits]) + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/epsilon_greedy_starter.py b/ab_testing/epsilon_greedy_starter.py new file mode 100755 index 00000000..4b2a77d1 --- /dev/null +++ b/ab_testing/epsilon_greedy_starter.py @@ -0,0 +1,88 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class BanditArm: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N = # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [BanditArm(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + num_times_explored = 0 + num_times_exploited = 0 + num_optimal = 0 + optimal_j = np.argmax([b.p for b in bandits]) + print("optimal j:", optimal_j) + + for i in range(NUM_TRIALS): + + # use epsilon-greedy to select the next bandit + if np.random.random() < EPS: + num_times_explored += 1 + j = # TODO + else: + num_times_exploited += 1 + j = # TODO + + if j == optimal_j: + num_optimal += 1 + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num_times_explored:", num_times_explored) + print("num_times_exploited:", num_times_exploited) + print("num times selected optimal bandit:", num_optimal) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/optimistic.py b/ab_testing/optimistic.py new file mode 100644 index 00000000..1d024fef --- /dev/null +++ b/ab_testing/optimistic.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 5. + self.N = 1. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = np.argmax([b.p_estimate for b in bandits]) + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/optimistic_starter.py b/ab_testing/optimistic_starter.py new file mode 100644 index 00000000..56b4e5c9 --- /dev/null +++ b/ab_testing/optimistic_starter.py @@ -0,0 +1,71 @@ +# From the course: Bayesin Machine Learning in Python: A/B Testing +# https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing +# https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import matplotlib.pyplot as plt +import numpy as np + + +NUM_TRIALS = 10000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = # TODO + self.N = # TODO + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + # TODO + self.p_estimate = # TODO + + +def experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + + rewards = np.zeros(NUM_TRIALS) + for i in range(NUM_TRIALS): + # use optimistic initial values to select the next bandit + j = # TODO + + # pull the arm for the bandit with the largest sample + x = bandits[j].pull() + + # update rewards log + rewards[i] = x + + # update the distribution for the bandit whose arm we just pulled + bandits[j].update(x) + + + # print mean estimates for each bandit + for b in bandits: + print("mean estimate:", b.p_estimate) + + # print total reward + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + # plot the results + cumulative_rewards = np.cumsum(rewards) + win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1) + plt.ylim([0, 1]) + plt.plot(win_rates) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + +if __name__ == "__main__": + experiment() diff --git a/ab_testing/ucb1.py b/ab_testing/ucb1.py new file mode 100644 index 00000000..5779b654 --- /dev/null +++ b/ab_testing/ucb1.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +# https://books.google.ca/books?id=_ATpBwAAQBAJ&lpg=PA201&ots=rinZM8jQ6s&dq=hoeffding%20bound%20gives%20probability%20%22greater%20than%201%22&pg=PA201#v=onepage&q&f=false +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def ucb(mean, n, nj): + return mean + np.sqrt(2*np.log(n) / nj) + + +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 + + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + for i in range(NUM_TRIALS): + j = np.argmax([ucb(b.p_estimate, total_plays, b.N) for b in bandits]) + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + # for the plot + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.xscale('log') + plt.show() + + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + + for b in bandits: + print(b.p_estimate) + + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + diff --git a/ab_testing/ucb1_starter.py b/ab_testing/ucb1_starter.py new file mode 100644 index 00000000..9e9c3106 --- /dev/null +++ b/ab_testing/ucb1_starter.py @@ -0,0 +1,81 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +# https://books.google.ca/books?id=_ATpBwAAQBAJ&lpg=PA201&ots=rinZM8jQ6s&dq=hoeffding%20bound%20gives%20probability%20%22greater%20than%201%22&pg=PA201#v=onepage&q&f=false +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import numpy as np +import matplotlib.pyplot as plt + + +NUM_TRIALS = 100000 +EPS = 0.1 +BANDIT_PROBABILITIES = [0.2, 0.5, 0.75] + + +class Bandit: + def __init__(self, p): + # p: the win rate + self.p = p + self.p_estimate = 0. + self.N = 0. # num samples collected so far + + def pull(self): + # draw a 1 with probability p + return np.random.random() < self.p + + def update(self, x): + self.N += 1. + self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N + + +def ucb(mean, n, nj): + return # TODO + + +def run_experiment(): + bandits = [Bandit(p) for p in BANDIT_PROBABILITIES] + rewards = np.empty(NUM_TRIALS) + total_plays = 0 + + # initialization: play each bandit once + for j in range(len(bandits)): + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + for i in range(NUM_TRIALS): + j = # TODO + x = bandits[j].pull() + total_plays += 1 + bandits[j].update(x) + + # for the plot + rewards[i] = x + cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1) + + # plot moving average ctr + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.xscale('log') + plt.show() + + # plot moving average ctr linear + plt.plot(cumulative_average) + plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES)) + plt.show() + + for b in bandits: + print(b.p_estimate) + + print("total reward earned:", rewards.sum()) + print("overall win rate:", rewards.sum() / NUM_TRIALS) + print("num times selected each bandit:", [b.N for b in bandits]) + + return cumulative_average + +if __name__ == '__main__': + run_experiment() + From ce468b7348dd2774494b727d05ab176741d42ed0 Mon Sep 17 00:00:00 2001 From: User Date: Sun, 11 Oct 2020 00:16:00 -0400 Subject: [PATCH 212/329] update --- pytorch/plot_rl_rewards.py | 8 +++++++- rl/plot_rl_rewards.py | 8 +++++++- tf2.0/plot_rl_rewards.py | 8 +++++++- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/pytorch/plot_rl_rewards.py b/pytorch/plot_rl_rewards.py index 85cc1b2e..3eb8e171 100644 --- a/pytorch/plot_rl_rewards.py +++ b/pytorch/plot_rl_rewards.py @@ -11,6 +11,12 @@ print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") -plt.hist(a, bins=20) +if args.mode == 'train': + # show the training progress + plt.plot(a) +else: + # test - show a histogram of rewards + plt.hist(a, bins=20) + plt.title(args.mode) plt.show() \ No newline at end of file diff --git a/rl/plot_rl_rewards.py b/rl/plot_rl_rewards.py index e239b501..ba182c9f 100644 --- a/rl/plot_rl_rewards.py +++ b/rl/plot_rl_rewards.py @@ -11,6 +11,12 @@ print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") -plt.hist(a, bins=20) +if args.mode == 'train': + # show the training progress + plt.plot(a) +else: + # test - show a histogram of rewards + plt.hist(a, bins=20) + plt.title(args.mode) plt.show() \ No newline at end of file diff --git a/tf2.0/plot_rl_rewards.py b/tf2.0/plot_rl_rewards.py index 85cc1b2e..3eb8e171 100644 --- a/tf2.0/plot_rl_rewards.py +++ b/tf2.0/plot_rl_rewards.py @@ -11,6 +11,12 @@ print(f"average reward: {a.mean():.2f}, min: {a.min():.2f}, max: {a.max():.2f}") -plt.hist(a, bins=20) +if args.mode == 'train': + # show the training progress + plt.plot(a) +else: + # test - show a histogram of rewards + plt.hist(a, bins=20) + plt.title(args.mode) plt.show() \ No newline at end of file From e3f167319325d9ae2d107507c92a0c497f5f470a Mon Sep 17 00:00:00 2001 From: User Date: Thu, 26 Nov 2020 13:27:39 -0500 Subject: [PATCH 213/329] update --- ab_testing/cdfs_and_percentiles.py | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 ab_testing/cdfs_and_percentiles.py diff --git a/ab_testing/cdfs_and_percentiles.py b/ab_testing/cdfs_and_percentiles.py new file mode 100644 index 00000000..124bc408 --- /dev/null +++ b/ab_testing/cdfs_and_percentiles.py @@ -0,0 +1,35 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + + +mu = 170 +sd = 7 + + +# generate samples from our distribution +x = norm.rvs(loc=mu, scale=sd, size=100) + +# maximum likelihood mean +x.mean() + +# maximum likelihood variance +x.var() + +# maximum likelihood std +x.std() + +# unbiased variance +x.var(ddof=1) + +# unbiased std +x.std(ddof=1) + +# at what height are you in the 95th percentile? +norm.ppf(0.95, loc=mu, scale=sd) + +# you are 160 cm tall, what percentile are you in? +norm.cdf(160, loc=mu, scale=sd) + +# you are 180 cm tall, what is the probability that someone is taller than you? +1 - norm.cdf(180, loc=mu, scale=sd) \ No newline at end of file From e12db82cc45bc6f844beb3356dc61278bb7f6852 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 28 Nov 2020 16:21:15 -0500 Subject: [PATCH 214/329] update --- rl2/cartpole/dqn_tf.py | 8 +++++++- rl2/cartpole/dqn_theano.py | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/rl2/cartpole/dqn_tf.py b/rl2/cartpole/dqn_tf.py index b6c812b1..e397acd6 100644 --- a/rl2/cartpole/dqn_tf.py +++ b/rl2/cartpole/dqn_tf.py @@ -16,6 +16,10 @@ from q_learning_bins import plot_running_avg +# global counter +global_iters = 0 + + # a version of HiddenLayer that keeps track of params class HiddenLayer: def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True): @@ -154,6 +158,7 @@ def sample_action(self, x, eps): def play_one(env, model, tmodel, eps, gamma, copy_period): + global global_iters observation = env.reset() done = False totalreward = 0 @@ -174,8 +179,9 @@ def play_one(env, model, tmodel, eps, gamma, copy_period): model.train(tmodel) iters += 1 + global_iters += 1 - if iters % copy_period == 0: + if global_iters % copy_period == 0: tmodel.copy_from(model) return totalreward diff --git a/rl2/cartpole/dqn_theano.py b/rl2/cartpole/dqn_theano.py index ebf7c36e..08dd2ded 100644 --- a/rl2/cartpole/dqn_theano.py +++ b/rl2/cartpole/dqn_theano.py @@ -17,6 +17,10 @@ from q_learning_bins import plot_running_avg +# global counter +global_iters = 0 + + # helper for adam optimizer # use tensorflow defaults def adam(cost, params, lr0=1e-2, beta1=0.9, beta2=0.999, eps=1e-8): @@ -170,6 +174,7 @@ def sample_action(self, x, eps): def play_one(env, model, tmodel, eps, gamma, copy_period): + global global_iters observation = env.reset() done = False totalreward = 0 @@ -190,8 +195,9 @@ def play_one(env, model, tmodel, eps, gamma, copy_period): model.train(tmodel) iters += 1 + global_iters += 1 - if iters % copy_period == 0: + if global_iters % copy_period == 0: tmodel.copy_from(model) return totalreward From 213e786540ea975cb039cae035891f5f6deddb56 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 22 Dec 2020 15:30:34 -0500 Subject: [PATCH 215/329] update --- ann_class/xor_donut.py | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/ann_class/xor_donut.py b/ann_class/xor_donut.py index 236883d3..48331337 100644 --- a/ann_class/xor_donut.py +++ b/ann_class/xor_donut.py @@ -82,10 +82,17 @@ def test_xor(): er = np.mean(prediction != Y) LL.append(ll) - W2 += learning_rate * (derivative_w2(Z, Y, pY) - regularization * W2) - b2 += learning_rate * (derivative_b2(Y, pY) - regularization * b2) - W1 += learning_rate * (derivative_w1(X, Z, Y, pY, W2) - regularization * W1) - b1 += learning_rate * (derivative_b1(Z, Y, pY, W2) - regularization * b1) + + # get gradients + gW2 = derivative_w2(Z, Y, pY) + gb2 = derivative_b2(Y, pY) + gW1 = derivative_w1(X, Z, Y, pY, W2) + gb1 = derivative_b1(Z, Y, pY, W2) + + W2 += learning_rate * (gW2 - regularization * W2) + b2 += learning_rate * (gb2 - regularization * b2) + W1 += learning_rate * (gW1 - regularization * W1) + b1 += learning_rate * (gb1 - regularization * b1) if i % 1000 == 0: print(ll) @@ -128,10 +135,17 @@ def test_donut(): prediction = predict(X, W1, b1, W2, b2) er = np.abs(prediction - Y).mean() LL.append(ll) - W2 += learning_rate * (derivative_w2(Z, Y, pY) - regularization * W2) - b2 += learning_rate * (derivative_b2(Y, pY) - regularization * b2) - W1 += learning_rate * (derivative_w1(X, Z, Y, pY, W2) - regularization * W1) - b1 += learning_rate * (derivative_b1(Z, Y, pY, W2) - regularization * b1) + + # get gradients + gW2 = derivative_w2(Z, Y, pY) + gb2 = derivative_b2(Y, pY) + gW1 = derivative_w1(X, Z, Y, pY, W2) + gb1 = derivative_b1(Z, Y, pY, W2) + + W2 += learning_rate * (gW2 - regularization * W2) + b2 += learning_rate * (gb2 - regularization * b2) + W1 += learning_rate * (gW1 - regularization * W1) + b1 += learning_rate * (gb1 - regularization * b1) if i % 300 == 0: print("i:", i, "ll:", ll, "classification rate:", 1 - er) plt.plot(LL) @@ -139,8 +153,8 @@ def test_donut(): if __name__ == '__main__': - # test_xor() - test_donut() + test_xor() + # test_donut() From c13853d7605babed3aeab0e238e639ea8162a765 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 22 Dec 2020 15:31:57 -0500 Subject: [PATCH 216/329] update --- ann_class/backprop.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ann_class/backprop.py b/ann_class/backprop.py index 9ce9f85d..62ce9e73 100644 --- a/ann_class/backprop.py +++ b/ann_class/backprop.py @@ -144,10 +144,16 @@ def main(): # this is gradient ASCENT, not DESCENT # be comfortable with both! # oldW2 = W2.copy() - W2 += learning_rate * derivative_w2(hidden, T, output) - b2 += learning_rate * derivative_b2(T, output) - W1 += learning_rate * derivative_w1(X, hidden, T, output, W2) - b1 += learning_rate * derivative_b1(T, output, W2, hidden) + + gW2 = derivative_w2(hidden, T, output) + gb2 = derivative_b2(T, output) + gW1 = derivative_w1(X, hidden, T, output, W2) + gb1 = derivative_b1(T, output, W2, hidden) + + W2 += learning_rate * gW2 + b2 += learning_rate * gb2 + W1 += learning_rate * gW1 + b1 += learning_rate * gb1 plt.plot(costs) plt.show() From 21a97ddc273bff51bab91192c4feb6e8729eb187 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 11 Jan 2021 23:22:39 -0500 Subject: [PATCH 217/329] update --- nlp_class2/bow_classifier.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nlp_class2/bow_classifier.py b/nlp_class2/bow_classifier.py index 60c1a92d..25588e3b 100644 --- a/nlp_class2/bow_classifier.py +++ b/nlp_class2/bow_classifier.py @@ -17,6 +17,7 @@ # data from https://www.cs.umb.edu/~smimarog/textmining/datasets/ +# alternate source: https://lazyprogrammer.me/course_files/deepnlp_classification_data.zip train = pd.read_csv('../large_files/r8-train-all-terms.txt', header=None, sep='\t') test = pd.read_csv('../large_files/r8-test-all-terms.txt', header=None, sep='\t') train.columns = ['label', 'content'] From 61591e6e720706d83c6a79e5bcba78ce352307be Mon Sep 17 00:00:00 2001 From: User Date: Wed, 27 Jan 2021 23:16:04 -0500 Subject: [PATCH 218/329] update --- ann_class2/sgd.py | 153 ++++++++++++++++++++++++++++------------------ 1 file changed, 95 insertions(+), 58 deletions(-) diff --git a/ann_class2/sgd.py b/ann_class2/sgd.py index 3c338b6d..9c6da842 100644 --- a/ann_class2/sgd.py +++ b/ann_class2/sgd.py @@ -1,15 +1,9 @@ # In this file we compare the progression of the cost function vs. iteration # for 3 cases: # 1) full gradient descent -# 2) batch gradient descent +# 2) mini-batch gradient descent # 3) stochastic gradient descent # -# We use the PCA-transformed data to keep the dimensionality down (D=300) -# I've tailored this example so that the training time for each is feasible. -# So what we are really comparing is how quickly each type of GD can converge, -# (but not actually waiting for convergence) and what the cost looks like at -# each iteration. -# # For the class Data Science: Practical Deep Learning Concepts in Theano and TensorFlow # https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow # https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow @@ -24,11 +18,11 @@ from sklearn.utils import shuffle from datetime import datetime -from util import get_transformed_data, forward, error_rate, cost, gradW, gradb, y2indicator +from util import get_normalized_data, forward, error_rate, cost, gradW, gradb, y2indicator def main(): - Xtrain, Xtest, Ytrain, Ytest = get_transformed_data() + Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() print("Performing logistic regression...") N, D = Xtrain.shape @@ -38,72 +32,101 @@ def main(): # 1. full W = np.random.randn(D, 10) / np.sqrt(D) b = np.zeros(10) - LL = [] - lr = 0.0001 - reg = 0.01 + test_losses_full = [] + lr = 0.9 + # lr0 = lr # save for later + reg = 0. t0 = datetime.now() + last_dt = 0 + intervals = [] for i in range(50): p_y = forward(Xtrain, W, b) - W += lr*(gradW(Ytrain_ind, p_y, Xtrain) - reg*W) - b += lr*(gradb(Ytrain_ind, p_y) - reg*b) - + gW = gradW(Ytrain_ind, p_y, Xtrain) / N + gb = gradb(Ytrain_ind, p_y) / N + + W += lr*(gW - reg*W) + b += lr*(gb - reg*b) p_y_test = forward(Xtest, W, b) - ll = cost(p_y_test, Ytest_ind) - LL.append(ll) - if i % 1 == 0: - err = error_rate(p_y_test, Ytest) - if i % 10 == 0: - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + test_loss = cost(p_y_test, Ytest_ind) + dt = (datetime.now() - t0).total_seconds() + + # save these + dt2 = dt - last_dt + last_dt = dt + intervals.append(dt2) + + test_losses_full.append([dt, test_loss]) + if (i + 1) % 10 == 0: + print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) print("Elapsted time for full GD:", datetime.now() - t0) + # save the max time so we don't surpass it in subsequent iterations + max_dt = dt + avg_interval_dt = np.mean(intervals) + # 2. stochastic W = np.random.randn(D, 10) / np.sqrt(D) b = np.zeros(10) - LL_stochastic = [] - lr = 0.0001 - reg = 0.01 + test_losses_sgd = [] + lr = 0.001 + reg = 0. t0 = datetime.now() + last_dt_calculated_loss = 0 + done = False for i in range(50): # takes very long since we're computing cost for 41k samples tmpX, tmpY = shuffle(Xtrain, Ytrain_ind) - for n in range(min(N, 500)): # shortcut so it won't take so long... + for n in range(N): x = tmpX[n,:].reshape(1,D) y = tmpY[n,:].reshape(1,10) p_y = forward(x, W, b) - W += lr*(gradW(y, p_y, x) - reg*W) - b += lr*(gradb(y, p_y) - reg*b) + gW = gradW(y, p_y, x) + gb = gradb(y, p_y) + + W += lr*(gW - reg*W) + b += lr*(gb - reg*b) + + dt = (datetime.now() - t0).total_seconds() + dt2 = dt - last_dt_calculated_loss - p_y_test = forward(Xtest, W, b) - ll = cost(p_y_test, Ytest_ind) - LL_stochastic.append(ll) + if dt2 > avg_interval_dt: + p_y_test = forward(Xtest, W, b) + test_loss = cost(p_y_test, Ytest_ind) + test_losses_sgd.append([dt, test_loss]) - if i % 1 == 0: - err = error_rate(p_y_test, Ytest) - if i % 10 == 0: - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + # time to quit + if dt > max_dt: + done = True + break + if done: + break + + if (i + 1) % 10 == 0: + print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) print("Elapsted time for SGD:", datetime.now() - t0) - # 3. batch + # 3. mini-batch W = np.random.randn(D, 10) / np.sqrt(D) b = np.zeros(10) - LL_batch = [] - lr = 0.0001 - reg = 0.01 + test_losses_batch = [] batch_sz = 500 + lr = 0.08 + reg = 0. n_batches = N // batch_sz + t0 = datetime.now() + last_dt_calculated_loss = 0 + done = False for i in range(50): tmpX, tmpY = shuffle(Xtrain, Ytrain_ind) for j in range(n_batches): @@ -111,29 +134,43 @@ def main(): y = tmpY[j*batch_sz:(j*batch_sz + batch_sz),:] p_y = forward(x, W, b) - W += lr*(gradW(y, p_y, x) - reg*W) - b += lr*(gradb(y, p_y) - reg*b) - - p_y_test = forward(Xtest, W, b) - ll = cost(p_y_test, Ytest_ind) - LL_batch.append(ll) - if i % 1 == 0: - err = error_rate(p_y_test, Ytest) - if i % 10 == 0: - print("Cost at iteration %d: %.6f" % (i, ll)) - print("Error rate:", err) + gW = gradW(y, p_y, x) / batch_sz + gb = gradb(y, p_y) / batch_sz + + W += lr*(gW - reg*W) + b += lr*(gb - reg*b) + + dt = (datetime.now() - t0).total_seconds() + dt2 = dt - last_dt_calculated_loss + + if dt2 > avg_interval_dt: + p_y_test = forward(Xtest, W, b) + test_loss = cost(p_y_test, Ytest_ind) + test_losses_batch.append([dt, test_loss]) + + # time to quit + if dt > max_dt: + done = True + break + if done: + break + + if (i + 1) % 10 == 0: + print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) - print("Elapsted time for batch GD:", datetime.now() - t0) + print("Elapsted time for mini-batch GD:", datetime.now() - t0) + # convert to numpy arrays + test_losses_full = np.array(test_losses_full) + test_losses_sgd = np.array(test_losses_sgd) + test_losses_batch = np.array(test_losses_batch) - x1 = np.linspace(0, 1, len(LL)) - plt.plot(x1, LL, label="full") - x2 = np.linspace(0, 1, len(LL_stochastic)) - plt.plot(x2, LL_stochastic, label="stochastic") - x3 = np.linspace(0, 1, len(LL_batch)) - plt.plot(x3, LL_batch, label="batch") + + plt.plot(test_losses_full[:,0], test_losses_full[:,1], label="full") + plt.plot(test_losses_sgd[:,0], test_losses_sgd[:,1], label="sgd") + plt.plot(test_losses_batch[:,0], test_losses_batch[:,1], label="mini-batch") plt.legend() plt.show() From 24e315730d9926974bcf1d032109d72bbc0dab8f Mon Sep 17 00:00:00 2001 From: User Date: Fri, 29 Jan 2021 02:19:19 -0500 Subject: [PATCH 219/329] update --- ann_class2/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ann_class2/extra_reading.txt b/ann_class2/extra_reading.txt index f1825dd0..096d0a7c 100644 --- a/ann_class2/extra_reading.txt +++ b/ann_class2/extra_reading.txt @@ -7,6 +7,9 @@ https://arxiv.org/abs/1609.08326 Asynchronous Stochastic Gradient Descent with Variance Reduction for Non-Convex Optimization https://arxiv.org/abs/1604.03584 +Adam: A Method for Stochastic Optimization +https://arxiv.org/abs/1412.6980 + Large Scale Distributed Deep Networks https://static.googleusercontent.com/media/research.google.com/en//archive/large_deep_networks_nips2012.pdf From cdc650c8062a2db094279693acbf20975e1182fb Mon Sep 17 00:00:00 2001 From: User Date: Sat, 30 Jan 2021 18:52:54 -0500 Subject: [PATCH 220/329] update --- ann_class2/sgd.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/ann_class2/sgd.py b/ann_class2/sgd.py index 9c6da842..91ab78b1 100644 --- a/ann_class2/sgd.py +++ b/ann_class2/sgd.py @@ -31,10 +31,10 @@ def main(): # 1. full W = np.random.randn(D, 10) / np.sqrt(D) + W0 = W.copy() # save for later b = np.zeros(10) test_losses_full = [] lr = 0.9 - # lr0 = lr # save for later reg = 0. t0 = datetime.now() last_dt = 0 @@ -70,7 +70,7 @@ def main(): # 2. stochastic - W = np.random.randn(D, 10) / np.sqrt(D) + W = W0.copy() b = np.zeros(10) test_losses_sgd = [] lr = 0.001 @@ -96,6 +96,7 @@ def main(): dt2 = dt - last_dt_calculated_loss if dt2 > avg_interval_dt: + last_dt_calculated_loss = dt p_y_test = forward(Xtest, W, b) test_loss = cost(p_y_test, Ytest_ind) test_losses_sgd.append([dt, test_loss]) @@ -107,7 +108,7 @@ def main(): if done: break - if (i + 1) % 10 == 0: + if (i + 1) % 1 == 0: print("Cost at iteration %d: %.6f" % (i + 1, test_loss)) p_y = forward(Xtest, W, b) print("Final error rate:", error_rate(p_y, Ytest)) @@ -115,13 +116,13 @@ def main(): # 3. mini-batch - W = np.random.randn(D, 10) / np.sqrt(D) + W = W0.copy() b = np.zeros(10) test_losses_batch = [] batch_sz = 500 lr = 0.08 reg = 0. - n_batches = N // batch_sz + n_batches = int(np.ceil(N / batch_sz)) t0 = datetime.now() @@ -130,12 +131,13 @@ def main(): for i in range(50): tmpX, tmpY = shuffle(Xtrain, Ytrain_ind) for j in range(n_batches): - x = tmpX[j*batch_sz:(j*batch_sz + batch_sz),:] - y = tmpY[j*batch_sz:(j*batch_sz + batch_sz),:] + x = tmpX[j*batch_sz:(j + 1)*batch_sz,:] + y = tmpY[j*batch_sz:(j + 1)*batch_sz,:] p_y = forward(x, W, b) - gW = gradW(y, p_y, x) / batch_sz - gb = gradb(y, p_y) / batch_sz + current_batch_sz = len(x) + gW = gradW(y, p_y, x) / current_batch_sz + gb = gradb(y, p_y) / current_batch_sz W += lr*(gW - reg*W) b += lr*(gb - reg*b) @@ -144,6 +146,7 @@ def main(): dt2 = dt - last_dt_calculated_loss if dt2 > avg_interval_dt: + last_dt_calculated_loss = dt p_y_test = forward(Xtest, W, b) test_loss = cost(p_y_test, Ytest_ind) test_losses_batch.append([dt, test_loss]) From 04a2c19291f5e6cf488e4e10deb7c73ccdee7bbf Mon Sep 17 00:00:00 2001 From: User Date: Fri, 5 Feb 2021 00:52:22 -0500 Subject: [PATCH 221/329] update --- rl/monte_carlo.py | 10 +++++++-- rl/monte_carlo_es.py | 45 +++++++++++++++++------------------------ rl/monte_carlo_no_es.py | 29 +++++++++++++++----------- 3 files changed, 43 insertions(+), 41 deletions(-) diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index b24dee2f..23cda186 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -10,13 +10,13 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy -SMALL_ENOUGH = 1e-3 +SMALL_ENOUGH = 1e-5 GAMMA = 0.9 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') # NOTE: this is only policy evaluation, not optimization -def play_game(grid, policy): +def play_game(grid, policy, max_steps=20): # returns a list of states and corresponding returns # reset game to start at a random position @@ -28,11 +28,17 @@ def play_game(grid, policy): s = grid.current_state() states_and_rewards = [(s, 0)] # list of tuples of (state, reward) + steps = 0 while not grid.game_over(): a = policy[s] r = grid.move(a) s = grid.current_state() states_and_rewards.append((s, r)) + + steps += 1 + if steps >= max_steps: + break + # calculate the returns by working backwards from the terminal state G = 0 states_and_returns = [] diff --git a/rl/monte_carlo_es.py b/rl/monte_carlo_es.py index 79fca9f1..0926766b 100644 --- a/rl/monte_carlo_es.py +++ b/rl/monte_carlo_es.py @@ -12,12 +12,13 @@ from iterative_policy_evaluation import print_values, print_policy GAMMA = 0.9 +LEARNING_RATE = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') # NOTE: this script implements the Monte Carlo Exploring-Starts method # for finding the optimal policy -def play_game(grid, policy): +def play_game(grid, policy, max_steps=20): # returns a list of states and corresponding returns # reset game to start at a random position @@ -35,31 +36,18 @@ def play_game(grid, policy): # each triple is s(t), a(t), r(t) # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) states_actions_rewards = [(s, a, 0)] - seen_states = set() - seen_states.add(grid.current_state()) - num_steps = 0 - while True: + for _ in range(max_steps): r = grid.move(a) - num_steps += 1 s = grid.current_state() - - if s in seen_states: - # hack so that we don't end up in an infinitely long episode - # bumping into the wall repeatedly - # if num_steps == 1 -> bumped into a wall and haven't moved anywhere - # reward = -10 - # else: - # reward = falls off by 1 / num_steps - reward = -10. / num_steps - states_actions_rewards.append((s, None, reward)) - break - elif grid.game_over(): + + if grid.game_over(): states_actions_rewards.append((s, None, r)) break else: a = policy[s] states_actions_rewards.append((s, a, r)) - seen_states.add(s) + + # seen_states.add(s) # calculate the returns by working backwards from the terminal state G = 0 @@ -93,10 +81,10 @@ def max_dict(d): if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation - # grid = standard_grid() + grid = standard_grid() # try the negative grid too, to see if agent will learn to go past the "bad spot" # in order to minimize number of steps - grid = negative_grid(step_cost=-0.9) + # grid = negative_grid(step_cost=-0.9) # print rewards print("rewards:") @@ -110,22 +98,25 @@ def max_dict(d): # initialize Q(s,a) and returns Q = {} - returns = {} # dictionary of state -> list of returns we've received + sample_counts = {} + # returns = {} # dictionary of state -> list of returns we've received states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} + sample_counts[s] = {} for a in ALL_POSSIBLE_ACTIONS: Q[s][a] = 0 # needs to be initialized to something so we can argmax it - returns[(s,a)] = [] + sample_counts[s][a] = 0 + # returns[(s,a)] = [] else: # terminal state or state we can't otherwise get to pass # repeat until convergence deltas = [] - for t in range(2000): - if t % 100 == 0: + for t in range(10000): + if t % 1000 == 0: print(t) # generate an episode using pi @@ -138,8 +129,8 @@ def max_dict(d): sa = (s, a) if sa not in seen_state_action_pairs: old_q = Q[s][a] - returns[sa].append(G) - Q[s][a] = np.mean(returns[sa]) + # returns[sa].append(G) + Q[s][a] = old_q + LEARNING_RATE * (G - old_q) biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) seen_state_action_pairs.add(sa) deltas.append(biggest_change) diff --git a/rl/monte_carlo_no_es.py b/rl/monte_carlo_no_es.py index e079da38..4515616a 100644 --- a/rl/monte_carlo_no_es.py +++ b/rl/monte_carlo_no_es.py @@ -13,6 +13,7 @@ from monte_carlo_es import max_dict GAMMA = 0.9 +LEARNING_RATE = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') # NOTE: find optimal policy and value function @@ -35,19 +36,19 @@ def random_action(a, eps=0.1): else: return np.random.choice(ALL_POSSIBLE_ACTIONS) -def play_game(grid, policy): +def play_game(grid, policy, eps, max_steps=20): # returns a list of states and corresponding returns # in this version we will NOT use "exploring starts" method # instead we will explore using an epsilon-soft policy s = (2, 0) grid.set_state(s) - a = random_action(policy[s]) + a = random_action(policy[s], eps) # be aware of the timing # each triple is s(t), a(t), r(t) # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) states_actions_rewards = [(s, a, 0)] - while True: + for _ in range(max_steps): r = grid.move(a) s = grid.current_state() if grid.game_over(): @@ -77,10 +78,10 @@ def play_game(grid, policy): if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation - # grid = standard_grid() + grid = standard_grid() # try the negative grid too, to see if agent will learn to go past the "bad spot" # in order to minimize number of steps - grid = negative_grid(step_cost=-0.1) + # grid = negative_grid(step_cost=-0.1) # print rewards print("rewards:") @@ -94,27 +95,28 @@ def play_game(grid, policy): # initialize Q(s,a) and returns Q = {} - returns = {} # dictionary of state -> list of returns we've received + # returns = {} # dictionary of state -> list of returns we've received states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} for a in ALL_POSSIBLE_ACTIONS: Q[s][a] = 0 - returns[(s,a)] = [] + # returns[(s,a)] = [] else: # terminal state or state we can't otherwise get to pass # repeat until convergence deltas = [] - for t in range(5000): - if t % 1000 == 0: + eps = 1.0 + for t in range(10000): + if t % 2000 == 0: print(t) # generate an episode using pi biggest_change = 0 - states_actions_returns = play_game(grid, policy) + states_actions_returns = play_game(grid, policy, eps=eps) # calculate Q(s,a) seen_state_action_pairs = set() @@ -124,8 +126,8 @@ def play_game(grid, policy): sa = (s, a) if sa not in seen_state_action_pairs: old_q = Q[s][a] - returns[sa].append(G) - Q[s][a] = np.mean(returns[sa]) + # returns[sa].append(G) + Q[s][a] = old_q + LEARNING_RATE * (G - old_q) biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) seen_state_action_pairs.add(sa) deltas.append(biggest_change) @@ -135,6 +137,9 @@ def play_game(grid, policy): a, _ = max_dict(Q[s]) policy[s] = a + # update epsilon + eps = max(eps - 0.01, 0.1) + plt.plot(deltas) plt.show() From dc7713c5c27fc97714c389541932f261106126fd Mon Sep 17 00:00:00 2001 From: User Date: Fri, 5 Feb 2021 00:58:28 -0500 Subject: [PATCH 222/329] update --- rl/grid_world.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rl/grid_world.py b/rl/grid_world.py index 87955e13..47d36a38 100644 --- a/rl/grid_world.py +++ b/rl/grid_world.py @@ -169,7 +169,8 @@ def move(self, action): next_state_probs = self.probs[(s, a)] next_states = list(next_state_probs.keys()) next_probs = list(next_state_probs.values()) - s2 = np.random.choice(next_states, p=next_probs) + next_state_idx = np.random.choice(len(next_states), p=next_probs) + s2 = next_states[next_state_idx] # update the current state self.i, self.j = s2 From dda1ebceb7f36d9c100bc87f635bd17f42c7de24 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 5 Feb 2021 01:02:19 -0500 Subject: [PATCH 223/329] update --- rl/monte_carlo.py | 2 +- rl/monte_carlo_es.py | 6 ------ rl/monte_carlo_no_es.py | 3 --- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index 23cda186..61532dc3 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -59,7 +59,7 @@ def play_game(grid, policy, max_steps=20): if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation - grid = standard_grid() + # grid = standard_grid() # print rewards print("rewards:") diff --git a/rl/monte_carlo_es.py b/rl/monte_carlo_es.py index 0926766b..7b52c1ee 100644 --- a/rl/monte_carlo_es.py +++ b/rl/monte_carlo_es.py @@ -98,17 +98,12 @@ def max_dict(d): # initialize Q(s,a) and returns Q = {} - sample_counts = {} - # returns = {} # dictionary of state -> list of returns we've received states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} - sample_counts[s] = {} for a in ALL_POSSIBLE_ACTIONS: Q[s][a] = 0 # needs to be initialized to something so we can argmax it - sample_counts[s][a] = 0 - # returns[(s,a)] = [] else: # terminal state or state we can't otherwise get to pass @@ -129,7 +124,6 @@ def max_dict(d): sa = (s, a) if sa not in seen_state_action_pairs: old_q = Q[s][a] - # returns[sa].append(G) Q[s][a] = old_q + LEARNING_RATE * (G - old_q) biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) seen_state_action_pairs.add(sa) diff --git a/rl/monte_carlo_no_es.py b/rl/monte_carlo_no_es.py index 4515616a..7da45809 100644 --- a/rl/monte_carlo_no_es.py +++ b/rl/monte_carlo_no_es.py @@ -95,14 +95,12 @@ def play_game(grid, policy, eps, max_steps=20): # initialize Q(s,a) and returns Q = {} - # returns = {} # dictionary of state -> list of returns we've received states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} for a in ALL_POSSIBLE_ACTIONS: Q[s][a] = 0 - # returns[(s,a)] = [] else: # terminal state or state we can't otherwise get to pass @@ -126,7 +124,6 @@ def play_game(grid, policy, eps, max_steps=20): sa = (s, a) if sa not in seen_state_action_pairs: old_q = Q[s][a] - # returns[sa].append(G) Q[s][a] = old_q + LEARNING_RATE * (G - old_q) biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) seen_state_action_pairs.add(sa) From 72f124a7477bfbb7c714c35940724d15f66c2a4b Mon Sep 17 00:00:00 2001 From: User Date: Fri, 5 Feb 2021 01:02:47 -0500 Subject: [PATCH 224/329] update --- rl/monte_carlo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index 61532dc3..23cda186 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -59,7 +59,7 @@ def play_game(grid, policy, max_steps=20): if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation - # grid = standard_grid() + grid = standard_grid() # print rewards print("rewards:") From ab26f33ff418ae09acd69954e3bbbf4f07f9f7d3 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 16 Feb 2021 15:17:25 -0500 Subject: [PATCH 225/329] choose random argmax --- ab_testing/epsilon_greedy.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ab_testing/epsilon_greedy.py b/ab_testing/epsilon_greedy.py index 512c70c2..8d44bd7c 100755 --- a/ab_testing/epsilon_greedy.py +++ b/ab_testing/epsilon_greedy.py @@ -32,7 +32,14 @@ def update(self, x): self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N -def experiment(): +def choose_random_argmax(a): + idx = np.argwhere(np.amax(a) == a).flatten() + return np.random.choice(idx) + + +def experiment(argmax=choose_random_argmax): + # argmax can also simply be np.argmax to choose the first argmax in case of ties + bandits = [BanditArm(p) for p in BANDIT_PROBABILITIES] rewards = np.zeros(NUM_TRIALS) @@ -50,7 +57,7 @@ def experiment(): j = np.random.randint(len(bandits)) else: num_times_exploited += 1 - j = np.argmax([b.p_estimate for b in bandits]) + j = argmax([b.p_estimate for b in bandits]) if j == optimal_j: num_optimal += 1 From 87bde629d25edf4a04647ecbe58e7b06691e5b56 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 16 Feb 2021 15:36:37 -0500 Subject: [PATCH 226/329] update --- ab_testing/epsilon_greedy.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ab_testing/epsilon_greedy.py b/ab_testing/epsilon_greedy.py index 8d44bd7c..b6eeb067 100755 --- a/ab_testing/epsilon_greedy.py +++ b/ab_testing/epsilon_greedy.py @@ -37,9 +37,7 @@ def choose_random_argmax(a): return np.random.choice(idx) -def experiment(argmax=choose_random_argmax): - # argmax can also simply be np.argmax to choose the first argmax in case of ties - +def experiment(): bandits = [BanditArm(p) for p in BANDIT_PROBABILITIES] rewards = np.zeros(NUM_TRIALS) @@ -57,7 +55,7 @@ def experiment(argmax=choose_random_argmax): j = np.random.randint(len(bandits)) else: num_times_exploited += 1 - j = argmax([b.p_estimate for b in bandits]) + j = choose_random_argmax([b.p_estimate for b in bandits]) if j == optimal_j: num_optimal += 1 From eb09506ba7c98f6e426bc0338848908a1423706c Mon Sep 17 00:00:00 2001 From: User Date: Sun, 21 Feb 2021 14:59:14 -0500 Subject: [PATCH 227/329] update --- rl/policy_iteration.py | 95 ----------------------- rl/policy_iteration_deterministic.py | 16 ++-- rl/policy_iteration_probabilistic.py | 18 +++-- rl/policy_iteration_random.py | 111 --------------------------- 4 files changed, 23 insertions(+), 217 deletions(-) delete mode 100644 rl/policy_iteration.py delete mode 100644 rl/policy_iteration_random.py diff --git a/rl/policy_iteration.py b/rl/policy_iteration.py deleted file mode 100644 index 4709038d..00000000 --- a/rl/policy_iteration.py +++ /dev/null @@ -1,95 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -SMALL_ENOUGH = 1e-3 -GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') - -# this is deterministic -# all p(s',r|s,a) = 1 or 0 - -if __name__ == '__main__': - # this grid gives you a reward of -0.1 for every non-terminal state - # we want to see if this will encourage finding a shorter path to the goal - grid = negative_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # we'll randomly choose an action and update as we learn - policy = {} - for s in grid.actions.keys(): - policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS) - - # initial policy - print("initial policy:") - print_policy(policy, grid) - - # initialize V(s) - V = {} - states = grid.all_states() - for s in states: - # V[s] = 0 - if s in grid.actions: - V[s] = np.random.random() - else: - # terminal state - V[s] = 0 - - # repeat until convergence - will break out when policy does not change - while True: - - # policy evaluation step - we already know how to do this! - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in policy: - a = policy[s] - grid.set_state(s) - r = grid.move(a) - V[s] = r + GAMMA * V[grid.current_state()] - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - - # policy improvement step - is_policy_converged = True - for s in states: - if s in policy: - old_a = policy[s] - new_a = None - best_value = float('-inf') - # loop through all possible actions to find the best current action - for a in ALL_POSSIBLE_ACTIONS: - grid.set_state(s) - r = grid.move(a) - v = r + GAMMA * V[grid.current_state()] - if v > best_value: - best_value = v - new_a = a - policy[s] = new_a - if new_a != old_a: - is_policy_converged = False - - if is_policy_converged: - break - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/policy_iteration_deterministic.py b/rl/policy_iteration_deterministic.py index be552a28..e18e75bf 100644 --- a/rl/policy_iteration_deterministic.py +++ b/rl/policy_iteration_deterministic.py @@ -41,11 +41,16 @@ def get_transition_probs_and_rewards(grid): return transition_probs, rewards -def evaluate_deterministic_policy(grid, policy): +def evaluate_deterministic_policy(grid, policy, initV=None): # initialize V(s) = 0 - V = {} - for s in grid.all_states(): - V[s] = 0 + if initV is None: + V = {} + for s in grid.all_states(): + V[s] = 0 + else: + # it's faster to use the existing V(s) since the value won't change + # that much from one policy to the next + V = initV # repeat until convergence it = 0 @@ -95,10 +100,11 @@ def evaluate_deterministic_policy(grid, policy): print_policy(policy, grid) # repeat until convergence - will break out when policy does not change + V = None while True: # policy evaluation step - we already know how to do this! - V = evaluate_deterministic_policy(grid, policy) + V = evaluate_deterministic_policy(grid, policy, initV=V) # policy improvement step is_policy_converged = True diff --git a/rl/policy_iteration_probabilistic.py b/rl/policy_iteration_probabilistic.py index cbfca297..0468886c 100644 --- a/rl/policy_iteration_probabilistic.py +++ b/rl/policy_iteration_probabilistic.py @@ -36,11 +36,16 @@ def get_transition_probs_and_rewards(grid): return transition_probs, rewards -def evaluate_deterministic_policy(grid, policy): +def evaluate_deterministic_policy(grid, policy, initV=None): # initialize V(s) = 0 - V = {} - for s in grid.all_states(): - V[s] = 0 + if initV is None: + V = {} + for s in grid.all_states(): + V[s] = 0 + else: + # it's faster to use the existing V(s) since the value won't change + # that much from one policy to the next + V = initV # repeat until convergence it = 0 @@ -72,7 +77,7 @@ def evaluate_deterministic_policy(grid, policy): if __name__ == '__main__': - grid = windy_grid_penalized(-2) + grid = windy_grid_penalized(-0.1) # grid = windy_grid() transition_probs, rewards = get_transition_probs_and_rewards(grid) @@ -91,10 +96,11 @@ def evaluate_deterministic_policy(grid, policy): print_policy(policy, grid) # repeat until convergence - will break out when policy does not change + V = None while True: # policy evaluation step - we already know how to do this! - V = evaluate_deterministic_policy(grid, policy) + V = evaluate_deterministic_policy(grid, policy, initV=V) # policy improvement step is_policy_converged = True diff --git a/rl/policy_iteration_random.py b/rl/policy_iteration_random.py deleted file mode 100644 index 54faf29e..00000000 --- a/rl/policy_iteration_random.py +++ /dev/null @@ -1,111 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -SMALL_ENOUGH = 1e-3 -GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') - -# next state and reward will now have some randomness -# you'll go in your desired direction with probability 0.5 -# you'll go in a random direction a' != a with probability 0.5/3 - -if __name__ == '__main__': - # this grid gives you a reward of -0.1 for every non-terminal state - # we want to see if this will encourage finding a shorter path to the goal - grid = negative_grid(step_cost=-1.0) - # grid = negative_grid(step_cost=-0.1) - # grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # we'll randomly choose an action and update as we learn - policy = {} - for s in grid.actions.keys(): - policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS) - - # initial policy - print("initial policy:") - print_policy(policy, grid) - - # initialize V(s) - V = {} - states = grid.all_states() - for s in states: - # V[s] = 0 - if s in grid.actions: - V[s] = np.random.random() - else: - # terminal state - V[s] = 0 - - # repeat until convergence - will break out when policy does not change - while True: - - # policy evaluation step - we already know how to do this! - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - new_v = 0 - if s in policy: - for a in ALL_POSSIBLE_ACTIONS: - if a == policy[s]: - p = 0.5 - else: - p = 0.5/3 - grid.set_state(s) - r = grid.move(a) - new_v += p*(r + GAMMA * V[grid.current_state()]) - V[s] = new_v - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - - # policy improvement step - is_policy_converged = True - for s in states: - if s in policy: - old_a = policy[s] - new_a = None - best_value = float('-inf') - # loop through all possible actions to find the best current action - for a in ALL_POSSIBLE_ACTIONS: # chosen action - v = 0 - for a2 in ALL_POSSIBLE_ACTIONS: # resulting action - if a == a2: - p = 0.5 - else: - p = 0.5/3 - grid.set_state(s) - r = grid.move(a2) - v += p*(r + GAMMA * V[grid.current_state()]) - if v > best_value: - best_value = v - new_a = a - policy[s] = new_a - if new_a != old_a: - is_policy_converged = False - - if is_policy_converged: - break - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) - # result: every move is as bad as losing, so lose as quickly as possible From 8335b1504a9f1c11df63be7b7968fb554b517c13 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 24 Mar 2021 19:04:47 -0400 Subject: [PATCH 228/329] update --- rl/monte_carlo2.py | 106 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 rl/monte_carlo2.py diff --git a/rl/monte_carlo2.py b/rl/monte_carlo2.py new file mode 100644 index 00000000..7ca99544 --- /dev/null +++ b/rl/monte_carlo2.py @@ -0,0 +1,106 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +from grid_world import standard_grid, negative_grid +from iterative_policy_evaluation import print_values, print_policy + +GAMMA = 0.9 + +# NOTE: this is only policy evaluation, not optimization + +def play_game(grid, policy, max_steps=20): + # returns a list of states and corresponding returns + + # reset game to start at a random position + # we need to do this, because given our current deterministic policy + # we would never end up at certain states, but we still want to measure their value + start_states = list(grid.actions.keys()) + start_idx = np.random.choice(len(start_states)) + grid.set_state(start_states[start_idx]) + + s = grid.current_state() + + # keep track of all states and rewards encountered + states = [] + rewards = [] + + steps = 0 + while not grid.game_over(): + a = policy[s] + r = grid.move(a) + next_s = grid.current_state() + + # update states and rewards lists + states.append(s) + rewards.append(r) + + steps += 1 + if steps >= max_steps: + break + + # update state + # note: there is no need to store the final terminal state + s = next_s + + return states, rewards + + +if __name__ == '__main__': + # use the standard grid again (0 for every step) so that we can compare + # to iterative policy evaluation + grid = standard_grid() + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + policy = { + (2, 0): 'U', + (1, 0): 'U', + (0, 0): 'R', + (0, 1): 'R', + (0, 2): 'R', + (1, 2): 'R', + (2, 1): 'R', + (2, 2): 'R', + (2, 3): 'U', + } + + # initialize V(s) and returns + V = {} + returns = {} # dictionary of state -> list of returns we've received + states = grid.all_states() + for s in states: + if s in grid.actions: + returns[s] = [] + else: + # terminal state or state we can't otherwise get to + V[s] = 0 + + # repeat + for t in range(100): + # generate an episode using pi + states, rewards = play_game(grid, policy) + G = 0 + T = len(states) + for t in range(T - 1, -1, -1): + s = states[t] + r = rewards[t] + G = r + GAMMA * G # update return + + # we'll use first-visit Monte Carlo + if s not in states[:t]: + returns[s].append(G) + V[s] = np.mean(returns[s]) + + print("values:") + print_values(V, grid) + print("policy:") + print_policy(policy, grid) From 853be19fd33921867f63ec6e26e0cc171bd558c6 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 24 Mar 2021 19:09:24 -0400 Subject: [PATCH 229/329] comment --- rl/monte_carlo.py | 4 ++++ rl/monte_carlo2.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index 23cda186..d83d236b 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -40,6 +40,10 @@ def play_game(grid, policy, max_steps=20): break # calculate the returns by working backwards from the terminal state + + # we want to return: + # states = [s(0), s(1), ..., s(T-1)] + # returns = [G(0), G(1), ..., G(T-1)] G = 0 states_and_returns = [] first = True diff --git a/rl/monte_carlo2.py b/rl/monte_carlo2.py index 7ca99544..a7a5203d 100644 --- a/rl/monte_carlo2.py +++ b/rl/monte_carlo2.py @@ -48,6 +48,10 @@ def play_game(grid, policy, max_steps=20): # note: there is no need to store the final terminal state s = next_s + # we want to return: + # states = [s(0), s(1), ..., S(T-1)] + # rewards = [R(1), R(2), ..., R(T) ] + return states, rewards From c2f48e93997949e99f5641d98fa045adbbd17aad Mon Sep 17 00:00:00 2001 From: User Date: Sat, 3 Apr 2021 17:44:14 -0400 Subject: [PATCH 230/329] update readme --- README.md | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 212e538d..77185e18 100644 --- a/README.md +++ b/README.md @@ -16,14 +16,27 @@ Why you should not fork this repo I've noticed that many people have out-of-date forks. Thus, I recommend not forking this repository if you take one of my courses. I am constantly updating my courses, and your fork will soon become out-of-date. You should clone the repository instead to make it easy to get updates (i.e. just "git pull" randomly and frequently). +Where is the code for your latest courses? +========================================== + +Beginning with Tensorflow 2, I started to use Google Colab. For those courses, unless otherwise noted, the code will be on Google Colab. Links to the notebooks are provided in the course. See the lecture "Where to get the code" for further details. + + Direct Course Links =================== -PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Aug 2020) +Financial Engineering and Artificial Intelligence in Python (special discount link for full VIP version as of Apr 2021) + +*** Note: if this coupon becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. FINANCEVIP2, FINANCEVIP3, etc.. + +https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP7 + + +PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Apr 2021) -*** Note: if this coupon becomes out of date, check my website for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc.. +*** Note: if this coupon becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc.. -https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP5 +https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP12 Financial Engineering and Artificial Intelligence in Python From 825cc7bdf6b39b49dbd7a8ddbbbcf721742dd56f Mon Sep 17 00:00:00 2001 From: User Date: Sat, 3 Apr 2021 17:46:51 -0400 Subject: [PATCH 231/329] update --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 77185e18..116108a3 100644 --- a/README.md +++ b/README.md @@ -22,34 +22,35 @@ Where is the code for your latest courses? Beginning with Tensorflow 2, I started to use Google Colab. For those courses, unless otherwise noted, the code will be on Google Colab. Links to the notebooks are provided in the course. See the lecture "Where to get the code" for further details. -Direct Course Links +VIP Course Links =================== -Financial Engineering and Artificial Intelligence in Python (special discount link for full VIP version as of Apr 2021) +**Financial Engineering and Artificial Intelligence in Python** (special discount link for full VIP version as of Apr 2021) *** Note: if this coupon becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. FINANCEVIP2, FINANCEVIP3, etc.. https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP7 -PyTorch: Deep Learning and Artificial Intelligence (special discount link for full VIP course as of Apr 2021) +**PyTorch: Deep Learning and Artificial Intelligence** (special discount link for full VIP course as of Apr 2021) *** Note: if this coupon becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc.. https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP12 -Financial Engineering and Artificial Intelligence in Python -https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP +**Tensorflow 2.0: Deep Learning and Artificial Intelligence** (VIP Content Only) +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 + + +Other Course Links +================== Tensorflow 2.0: Deep Learning and Artificial Intelligence (Main Course - special discount link) https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 -Tensorflow 2.0: Deep Learning and Artificial Intelligence (VIP Content) -https://deeplearningcourses.com/c/deep-learning-tensorflow-2 - Cutting-Edge AI: Deep Reinforcement Learning in Python https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence From 6b988f2ec18aa91a71d2933c9ec56af62d28d037 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 21 Apr 2021 02:08:49 -0400 Subject: [PATCH 232/329] update --- rl/grid_world.py | 12 +++ rl/monte_carlo2.py | 14 ++-- rl/monte_carlo_es.py | 115 ++++++++++++++------------ rl/monte_carlo_no_es.py | 173 +++++++++++++++++++++++----------------- 4 files changed, 180 insertions(+), 134 deletions(-) diff --git a/rl/grid_world.py b/rl/grid_world.py index 47d36a38..ff3c68af 100644 --- a/rl/grid_world.py +++ b/rl/grid_world.py @@ -34,6 +34,12 @@ def current_state(self): def is_terminal(self, s): return s not in self.actions + def reset(self): + # put agent back in start position + self.i = 2 + self.j = 0 + return (self.i, self.j) + def get_next_state(self, s, a): # this answers: where would I end up if I perform action 'a' in state 's'? i, j = s[0], s[1] @@ -249,6 +255,12 @@ def windy_grid(): return g +def windy_grid_no_wind(): + g = windy_grid() + g.probs[((1, 2), 'U')] = {(0, 2): 1.0} + return g + + def windy_grid_penalized(step_cost=-0.1): g = WindyGrid(3, 4, (2, 0)) diff --git a/rl/monte_carlo2.py b/rl/monte_carlo2.py index a7a5203d..9cfb9bde 100644 --- a/rl/monte_carlo2.py +++ b/rl/monte_carlo2.py @@ -27,8 +27,8 @@ def play_game(grid, policy, max_steps=20): s = grid.current_state() # keep track of all states and rewards encountered - states = [] - rewards = [] + states = [s] + rewards = [0] steps = 0 while not grid.game_over(): @@ -37,7 +37,7 @@ def play_game(grid, policy, max_steps=20): next_s = grid.current_state() # update states and rewards lists - states.append(s) + states.append(next_s) rewards.append(r) steps += 1 @@ -49,8 +49,8 @@ def play_game(grid, policy, max_steps=20): s = next_s # we want to return: - # states = [s(0), s(1), ..., S(T-1)] - # rewards = [R(1), R(2), ..., R(T) ] + # states = [s(0), s(1), ..., S(T)] + # rewards = [R(0), R(1), ..., R(T)] return states, rewards @@ -94,9 +94,9 @@ def play_game(grid, policy, max_steps=20): states, rewards = play_game(grid, policy) G = 0 T = len(states) - for t in range(T - 1, -1, -1): + for t in range(T - 2, -1, -1): s = states[t] - r = rewards[t] + r = rewards[t+1] G = r + GAMMA * G # update return # we'll use first-visit Monte Carlo diff --git a/rl/monte_carlo_es.py b/rl/monte_carlo_es.py index 7b52c1ee..8f5f8573 100644 --- a/rl/monte_carlo_es.py +++ b/rl/monte_carlo_es.py @@ -12,15 +12,13 @@ from iterative_policy_evaluation import print_values, print_policy GAMMA = 0.9 -LEARNING_RATE = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') # NOTE: this script implements the Monte Carlo Exploring-Starts method # for finding the optimal policy -def play_game(grid, policy, max_steps=20): - # returns a list of states and corresponding returns +def play_game(grid, policy, max_steps=20): # reset game to start at a random position # we need to do this if we have a deterministic policy # we would never end up at certain states, but we still want to measure their value @@ -32,50 +30,48 @@ def play_game(grid, policy, max_steps=20): s = grid.current_state() a = np.random.choice(ALL_POSSIBLE_ACTIONS) # first action is uniformly random - # be aware of the timing - # each triple is s(t), a(t), r(t) - # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) - states_actions_rewards = [(s, a, 0)] + states = [s] + actions = [a] + rewards = [0] + for _ in range(max_steps): r = grid.move(a) s = grid.current_state() + + rewards.append(r) + states.append(s) if grid.game_over(): - states_actions_rewards.append((s, None, r)) break else: a = policy[s] - states_actions_rewards.append((s, a, r)) - - # seen_states.add(s) - - # calculate the returns by working backwards from the terminal state - G = 0 - states_actions_returns = [] - first = True - for s, a, r in reversed(states_actions_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_actions_returns.append((s, a, G)) - G = r + GAMMA*G - states_actions_returns.reverse() # we want it to be in order of state visited - return states_actions_returns + actions.append(a) + + # we want to return: + # states = [s(0), s(1), ..., s(T-1), s(T)] + # actions = [a(0), a(1), ..., a(T-1), ] + # rewards = [ 0, R(1), ..., R(T-1), R(T)] + + return states, actions, rewards def max_dict(d): # returns the argmax (key) and max (value) from a dictionary # put this into a function since we are using it so often - max_key = None - max_val = float('-inf') - for k, v in d.items(): - if v > max_val: - max_val = v - max_key = k - return max_key, max_val + + # find max val + max_val = max(d.values()) + + # find keys corresponding to max val + max_keys = [key for key, val in d.items() if val == max_val] + + ### slow version + # max_keys = [] + # for key, val in d.items(): + # if val == max_val: + # max_keys.append(key) + + return np.random.choice(max_keys), max_val if __name__ == '__main__': @@ -84,7 +80,7 @@ def max_dict(d): grid = standard_grid() # try the negative grid too, to see if agent will learn to go past the "bad spot" # in order to minimize number of steps - # grid = negative_grid(step_cost=-0.9) + # grid = negative_grid(step_cost=-0.1) # print rewards print("rewards:") @@ -98,41 +94,56 @@ def max_dict(d): # initialize Q(s,a) and returns Q = {} + sample_counts = {} states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} + sample_counts[s] = {} for a in ALL_POSSIBLE_ACTIONS: - Q[s][a] = 0 # needs to be initialized to something so we can argmax it + Q[s][a] = 0 + sample_counts[s][a] = 0 else: # terminal state or state we can't otherwise get to pass # repeat until convergence deltas = [] - for t in range(10000): - if t % 1000 == 0: - print(t) + for it in range(10000): + if it % 1000 == 0: + print(it) # generate an episode using pi biggest_change = 0 - states_actions_returns = play_game(grid, policy) - seen_state_action_pairs = set() - for s, a, G in states_actions_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - sa = (s, a) - if sa not in seen_state_action_pairs: + states, actions, rewards = play_game(grid, policy) + + # create a list of only state-action pairs for lookup + states_actions = list(zip(states, actions)) + + T = len(states) + G = 0 + for t in range(T - 2, -1, -1): + # retrieve current s, a, r tuple + s = states[t] + a = actions[t] + + # update G + G = rewards[t+1] + GAMMA * G + + # check if we have already seen (s, a) ("first-visit") + if (s, a) not in states_actions[:t]: old_q = Q[s][a] - Q[s][a] = old_q + LEARNING_RATE * (G - old_q) + sample_counts[s][a] += 1 + lr = 1 / sample_counts[s][a] + Q[s][a] = old_q + lr * (G - old_q) + + # update policy + policy[s] = max_dict(Q[s])[0] + + # update delta biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) - seen_state_action_pairs.add(sa) deltas.append(biggest_change) - # update policy - for s in policy.keys(): - policy[s] = max_dict(Q[s])[0] - plt.plot(deltas) plt.show() diff --git a/rl/monte_carlo_no_es.py b/rl/monte_carlo_no_es.py index 7da45809..245ccb2f 100644 --- a/rl/monte_carlo_no_es.py +++ b/rl/monte_carlo_no_es.py @@ -7,72 +7,74 @@ import numpy as np +import pandas as pd import matplotlib.pyplot as plt from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy -from monte_carlo_es import max_dict GAMMA = 0.9 -LEARNING_RATE = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') -# NOTE: find optimal policy and value function -# using on-policy first-visit MC -def random_action(a, eps=0.1): - # choose given a with probability 1 - eps + eps/4 - # choose some other a' != a with probability eps/4 + +def epsilon_greedy(policy, s, eps=0.1): p = np.random.random() - # if p < (1 - eps + eps/len(ALL_POSSIBLE_ACTIONS)): - # return a - # else: - # tmp = list(ALL_POSSIBLE_ACTIONS) - # tmp.remove(a) - # return np.random.choice(tmp) - # - # this is equivalent to the above if p < (1 - eps): - return a + return policy[s] else: return np.random.choice(ALL_POSSIBLE_ACTIONS) -def play_game(grid, policy, eps, max_steps=20): - # returns a list of states and corresponding returns - # in this version we will NOT use "exploring starts" method - # instead we will explore using an epsilon-soft policy - s = (2, 0) - grid.set_state(s) - a = random_action(policy[s], eps) - - # be aware of the timing - # each triple is s(t), a(t), r(t) - # but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t) - states_actions_rewards = [(s, a, 0)] + +def play_game(grid, policy, max_steps=20): + + # start state + s = grid.reset() + + # choose action + a = epsilon_greedy(policy, s) + + states = [s] + actions = [a] + rewards = [0] + for _ in range(max_steps): r = grid.move(a) s = grid.current_state() + + rewards.append(r) + states.append(s) + if grid.game_over(): - states_actions_rewards.append((s, None, r)) break else: - a = random_action(policy[s]) # the next state is stochastic - states_actions_rewards.append((s, a, r)) - - # calculate the returns by working backwards from the terminal state - G = 0 - states_actions_returns = [] - first = True - for s, a, r in reversed(states_actions_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_actions_returns.append((s, a, G)) - G = r + GAMMA*G - states_actions_returns.reverse() # we want it to be in order of state visited - return states_actions_returns + a = epsilon_greedy(policy, s) + actions.append(a) + + # we want to return: + # states = [s(0), s(1), ..., s(T-1), s(T)] + # actions = [a(0), a(1), ..., a(T-1), ] + # rewards = [ 0, R(1), ..., R(T-1), R(T)] + + return states, actions, rewards + + +def max_dict(d): + # returns the argmax (key) and max (value) from a dictionary + # put this into a function since we are using it so often + + # find max val + max_val = max(d.values()) + + # find keys corresponding to max val + max_keys = [key for key, val in d.items() if val == max_val] + + ### slow version + # max_keys = [] + # for key, val in d.items(): + # if val == max_val: + # max_keys.append(key) + + return np.random.choice(max_keys), max_val if __name__ == '__main__': @@ -95,59 +97,80 @@ def play_game(grid, policy, eps, max_steps=20): # initialize Q(s,a) and returns Q = {} + sample_counts = {} + state_sample_count = {} states = grid.all_states() for s in states: if s in grid.actions: # not a terminal state Q[s] = {} + sample_counts[s] = {} + state_sample_count[s] = 0 for a in ALL_POSSIBLE_ACTIONS: Q[s][a] = 0 + sample_counts[s][a] = 0 else: # terminal state or state we can't otherwise get to pass # repeat until convergence deltas = [] - eps = 1.0 - for t in range(10000): - if t % 2000 == 0: - print(t) + for it in range(10000): + if it % 1000 == 0: + print(it) # generate an episode using pi biggest_change = 0 - states_actions_returns = play_game(grid, policy, eps=eps) - - # calculate Q(s,a) - seen_state_action_pairs = set() - for s, a, G in states_actions_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - sa = (s, a) - if sa not in seen_state_action_pairs: + states, actions, rewards = play_game(grid, policy) + + # create a list of only state-action pairs for lookup + states_actions = list(zip(states, actions)) + + T = len(states) + G = 0 + for t in range(T - 2, -1, -1): + # retrieve current s, a, r tuple + s = states[t] + a = actions[t] + + # update G + G = rewards[t+1] + GAMMA * G + + # check if we have already seen (s, a) ("first-visit") + if (s, a) not in states_actions[:t]: old_q = Q[s][a] - Q[s][a] = old_q + LEARNING_RATE * (G - old_q) - biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) - seen_state_action_pairs.add(sa) - deltas.append(biggest_change) + sample_counts[s][a] += 1 + lr = 1 / sample_counts[s][a] + Q[s][a] = old_q + lr * (G - old_q) - # calculate new policy pi(s) = argmax[a]{ Q(s,a) } - for s in policy.keys(): - a, _ = max_dict(Q[s]) - policy[s] = a + # update policy + policy[s] = max_dict(Q[s])[0] - # update epsilon - eps = max(eps - 0.01, 0.1) + # update state sample count + state_sample_count[s] += 1 + + # update delta + biggest_change = max(biggest_change, np.abs(old_q - Q[s][a])) + deltas.append(biggest_change) plt.plot(deltas) plt.show() - # find the optimal state-value function - # V(s) = max[a]{ Q(s,a) } + print("final policy:") + print_policy(policy, grid) + + # find V V = {} - for s in policy.keys(): + for s, Qs in Q.items(): V[s] = max_dict(Q[s])[1] print("final values:") print_values(V, grid) - print("final policy:") - print_policy(policy, grid) + print("state_sample_count:") + state_sample_count_arr = np.zeros((grid.rows, grid.cols)) + for i in range(grid.rows): + for j in range(grid.cols): + if (i, j) in state_sample_count: + state_sample_count_arr[i,j] = state_sample_count[(i, j)] + df = pd.DataFrame(state_sample_count_arr) + print(df) From d3804c20030180cdb0af95f2313747b1a1bc4ece Mon Sep 17 00:00:00 2001 From: User Date: Fri, 23 Apr 2021 02:52:18 -0400 Subject: [PATCH 233/329] update --- rl/q_learning.py | 73 +++++++++++++++----------------------------- rl/sarsa.py | 67 +++++++++++++++------------------------- rl/td0_prediction.py | 72 +++++++++++++++++++++++++------------------ 3 files changed, 92 insertions(+), 120 deletions(-) mode change 100644 => 100755 rl/td0_prediction.py diff --git a/rl/q_learning.py b/rl/q_learning.py index f9109650..47c2cb40 100644 --- a/rl/q_learning.py +++ b/rl/q_learning.py @@ -11,29 +11,21 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy from monte_carlo_es import max_dict -from td0_prediction import random_action GAMMA = 0.9 ALPHA = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') +def epsilon_greedy(Q, s, eps=0.1): + if np.random.random() < eps: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + else: + a_opt = max_dict(Q[s])[0] + return a_opt + + if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # # grid = standard_grid() grid = negative_grid(step_cost=-0.1) @@ -41,8 +33,6 @@ print("rewards:") print_values(grid.rewards, grid) - # no policy initialization, we will derive our policy from most recent Q - # initialize Q(s,a) Q = {} states = grid.all_states() @@ -60,52 +50,39 @@ update_counts_sa[s][a] = 1.0 # repeat until convergence - t = 1.0 - deltas = [] + reward_per_episode = [] for it in range(10000): - if it % 100 == 0: - t += 1e-2 if it % 2000 == 0: print("it:", it) - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a, _ = max_dict(Q[s]) + # begin a new episode + s = grid.reset() biggest_change = 0 + episode_reward = 0 while not grid.game_over(): - a = random_action(a, eps=0.5/t) # epsilon-greedy - # random action also works, but slower since you can bump into walls - # a = np.random.choice(ALL_POSSIBLE_ACTIONS) + # perform action and get next state + reward + a = epsilon_greedy(Q, s, eps=0.1) r = grid.move(a) s2 = grid.current_state() - # we will update Q(s,a) AS we experience the episode - old_qsa = Q[s][a] - # the difference between SARSA and Q-Learning is with Q-Learning - # we will use this max[a']{ Q(s',a')} in our update - # even if we do not end up taking this action in the next step - a2, max_q_s2a2 = max_dict(Q[s2]) - Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*max_q_s2a2 - Q[s][a]) - biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a])) + # update reward + episode_reward += r + + # update Q(s,a) + maxQ = max_dict(Q[s2])[1] + Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*maxQ - Q[s][a]) # we would like to know how often Q(s) has been updated too update_counts[s] = update_counts.get(s,0) + 1 # next state becomes current state s = s2 - a = a2 - - deltas.append(biggest_change) - plt.plot(deltas) + # log the reward for this episode + reward_per_episode.append(episode_reward) + + plt.plot(reward_per_episode) + plt.title("reward_per_episode") plt.show() # determine the policy from Q* diff --git a/rl/sarsa.py b/rl/sarsa.py index 25c1a94d..470ce6da 100644 --- a/rl/sarsa.py +++ b/rl/sarsa.py @@ -11,29 +11,21 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy from monte_carlo_es import max_dict -from td0_prediction import random_action GAMMA = 0.9 ALPHA = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') +def epsilon_greedy(Q, s, eps=0.1): + if np.random.random() < eps: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + else: + a_opt = max_dict(Q[s])[0] + return a_opt + + if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # # grid = standard_grid() grid = negative_grid(step_cost=-0.1) @@ -41,8 +33,6 @@ print("rewards:") print_values(grid.rewards, grid) - # no policy initialization, we will derive our policy from most recent Q - # initialize Q(s,a) Q = {} states = grid.all_states() @@ -60,40 +50,29 @@ update_counts_sa[s][a] = 1.0 # repeat until convergence - t = 1.0 - deltas = [] + reward_per_episode = [] for it in range(10000): - if it % 100 == 0: - t += 1e-2 if it % 2000 == 0: print("it:", it) - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a = max_dict(Q[s])[0] - a = random_action(a, eps=0.5/t) + # begin a new episode + s = grid.reset() + a = epsilon_greedy(Q, s, eps=0.1) biggest_change = 0 + episode_reward = 0 while not grid.game_over(): + # perform action and get next state + reward r = grid.move(a) s2 = grid.current_state() - # we need the next action as well since Q(s,a) depends on Q(s',a') - # if s2 not in policy then it's a terminal state, all Q are 0 - a2 = max_dict(Q[s2])[0] - a2 = random_action(a2, eps=0.5/t) # epsilon-greedy + # update reward + episode_reward += r + + # get next action + a2 = epsilon_greedy(Q, s2, eps=0.1) - # we will update Q(s,a) AS we experience the episode - old_qsa = Q[s][a] + # update Q(s,a) Q[s][a] = Q[s][a] + ALPHA*(r + GAMMA*Q[s2][a2] - Q[s][a]) - biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a])) # we would like to know how often Q(s) has been updated too update_counts[s] = update_counts.get(s,0) + 1 @@ -102,9 +81,11 @@ s = s2 a = a2 - deltas.append(biggest_change) + # log the reward for this episode + reward_per_episode.append(episode_reward) - plt.plot(deltas) + plt.plot(reward_per_episode) + plt.title("reward_per_episode") plt.show() # determine the policy from Q* diff --git a/rl/td0_prediction.py b/rl/td0_prediction.py old mode 100644 new mode 100755 index 08b9b239..e76dac78 --- a/rl/td0_prediction.py +++ b/rl/td0_prediction.py @@ -18,28 +18,28 @@ # NOTE: this is only policy evaluation, not optimization -def random_action(a, eps=0.1): +def epsilon_greedy(policy, s, eps=0.1): # we'll use epsilon-soft to ensure all states are visited # what happens if you don't do this? i.e. eps=0 p = np.random.random() if p < (1 - eps): - return a + return policy[s] else: return np.random.choice(ALL_POSSIBLE_ACTIONS) -def play_game(grid, policy): - # returns a list of states and corresponding rewards (not returns as in MC) - # start at the designated start state - s = (2, 0) - grid.set_state(s) - states_and_rewards = [(s, 0)] # list of tuples of (state, reward) - while not grid.game_over(): - a = policy[s] - a = random_action(a) - r = grid.move(a) - s = grid.current_state() - states_and_rewards.append((s, r)) - return states_and_rewards +# def play_game(grid, policy): +# # returns a list of states and corresponding rewards (not returns as in MC) +# # start at the designated start state +# s = (2, 0) +# grid.set_state(s) +# states_and_rewards = [(s, 0)] # list of tuples of (state, reward) +# while not grid.game_over(): +# a = policy[s] +# a = random_action(a) +# r = grid.move(a) +# s = grid.current_state() +# states_and_rewards.append((s, r)) +# return states_and_rewards if __name__ == '__main__': @@ -70,21 +70,35 @@ def play_game(grid, policy): for s in states: V[s] = 0 + # store max change in V(s) per episode + deltas = [] + # repeat until convergence - for it in range(1000): - - # generate an episode using pi - states_and_rewards = play_game(grid, policy) - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - for t in range(len(states_and_rewards) - 1): - s, _ = states_and_rewards[t] - s2, r = states_and_rewards[t+1] - # we will update V(s) AS we experience the episode - V[s] = V[s] + ALPHA*(r + GAMMA*V[s2] - V[s]) + n_episodes = 10000 + for it in range(n_episodes): + # begin a new episode + s = grid.reset() + + delta = 0 + while not grid.game_over(): + a = epsilon_greedy(policy, s) + + r = grid.move(a) + s_next = grid.current_state() + + # update V(s) + v_old = V[s] + V[s] = V[s] + ALPHA*(r + GAMMA*V[s_next] - V[s]) + delta = max(delta, np.abs(V[s] - v_old)) + + # next state becomes current state + s = s_next + + # store delta + deltas.append(delta) + + plt.plot(deltas) + plt.show() print("values:") print_values(V, grid) From b39d7c5747219fb94e39af64fc613529b0cec8a4 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 23 Apr 2021 02:52:50 -0400 Subject: [PATCH 234/329] update --- rl/td0_prediction.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/rl/td0_prediction.py b/rl/td0_prediction.py index e76dac78..98101eff 100755 --- a/rl/td0_prediction.py +++ b/rl/td0_prediction.py @@ -16,7 +16,6 @@ ALPHA = 0.1 ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') -# NOTE: this is only policy evaluation, not optimization def epsilon_greedy(policy, s, eps=0.1): # we'll use epsilon-soft to ensure all states are visited @@ -27,20 +26,6 @@ def epsilon_greedy(policy, s, eps=0.1): else: return np.random.choice(ALL_POSSIBLE_ACTIONS) -# def play_game(grid, policy): -# # returns a list of states and corresponding rewards (not returns as in MC) -# # start at the designated start state -# s = (2, 0) -# grid.set_state(s) -# states_and_rewards = [(s, 0)] # list of tuples of (state, reward) -# while not grid.game_over(): -# a = policy[s] -# a = random_action(a) -# r = grid.move(a) -# s = grid.current_state() -# states_and_rewards.append((s, r)) -# return states_and_rewards - if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare From 225884e365f0a03667ae9c1ece44d32957819dcb Mon Sep 17 00:00:00 2001 From: User Date: Fri, 23 Apr 2021 17:41:26 -0400 Subject: [PATCH 235/329] update --- rl/q_learning.py | 5 ----- rl/sarsa.py | 5 ----- 2 files changed, 10 deletions(-) diff --git a/rl/q_learning.py b/rl/q_learning.py index 47c2cb40..69ab2c5d 100644 --- a/rl/q_learning.py +++ b/rl/q_learning.py @@ -43,11 +43,6 @@ def epsilon_greedy(Q, s, eps=0.1): # let's also keep track of how many times Q[s] has been updated update_counts = {} - update_counts_sa = {} - for s in states: - update_counts_sa[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - update_counts_sa[s][a] = 1.0 # repeat until convergence reward_per_episode = [] diff --git a/rl/sarsa.py b/rl/sarsa.py index 470ce6da..08e94513 100644 --- a/rl/sarsa.py +++ b/rl/sarsa.py @@ -43,11 +43,6 @@ def epsilon_greedy(Q, s, eps=0.1): # let's also keep track of how many times Q[s] has been updated update_counts = {} - update_counts_sa = {} - for s in states: - update_counts_sa[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - update_counts_sa[s][a] = 1.0 # repeat until convergence reward_per_episode = [] From 0c29b73eb2ed82d382bc5df70428b89add9b9edd Mon Sep 17 00:00:00 2001 From: User Date: Fri, 23 Apr 2021 17:55:10 -0400 Subject: [PATCH 236/329] update --- rl/q_learning.py | 1 - rl/sarsa.py | 1 - 2 files changed, 2 deletions(-) diff --git a/rl/q_learning.py b/rl/q_learning.py index 69ab2c5d..d4c3c145 100644 --- a/rl/q_learning.py +++ b/rl/q_learning.py @@ -52,7 +52,6 @@ def epsilon_greedy(Q, s, eps=0.1): # begin a new episode s = grid.reset() - biggest_change = 0 episode_reward = 0 while not grid.game_over(): # perform action and get next state + reward diff --git a/rl/sarsa.py b/rl/sarsa.py index 08e94513..5e1b9d5a 100644 --- a/rl/sarsa.py +++ b/rl/sarsa.py @@ -53,7 +53,6 @@ def epsilon_greedy(Q, s, eps=0.1): # begin a new episode s = grid.reset() a = epsilon_greedy(Q, s, eps=0.1) - biggest_change = 0 episode_reward = 0 while not grid.game_over(): # perform action and get next state + reward From 886fc915902d1fc45a675afaa907036f1c05cf4f Mon Sep 17 00:00:00 2001 From: User Date: Mon, 3 May 2021 00:04:29 -0400 Subject: [PATCH 237/329] update --- rl/monte_carlo.py | 57 +++++++++++------------ rl/monte_carlo2.py | 110 --------------------------------------------- 2 files changed, 27 insertions(+), 140 deletions(-) delete mode 100644 rl/monte_carlo2.py diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index d83d236b..9cfb9bde 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -10,9 +10,7 @@ from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy -SMALL_ENOUGH = 1e-5 GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') # NOTE: this is only policy evaluation, not optimization @@ -27,37 +25,34 @@ def play_game(grid, policy, max_steps=20): grid.set_state(start_states[start_idx]) s = grid.current_state() - states_and_rewards = [(s, 0)] # list of tuples of (state, reward) + + # keep track of all states and rewards encountered + states = [s] + rewards = [0] + steps = 0 while not grid.game_over(): a = policy[s] r = grid.move(a) - s = grid.current_state() - states_and_rewards.append((s, r)) + next_s = grid.current_state() + + # update states and rewards lists + states.append(next_s) + rewards.append(r) steps += 1 if steps >= max_steps: break - # calculate the returns by working backwards from the terminal state + # update state + # note: there is no need to store the final terminal state + s = next_s # we want to return: - # states = [s(0), s(1), ..., s(T-1)] - # returns = [G(0), G(1), ..., G(T-1)] - G = 0 - states_and_returns = [] - first = True - for s, r in reversed(states_and_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_and_returns.append((s, G)) - G = r + GAMMA*G - states_and_returns.reverse() # we want it to be in order of state visited - return states_and_returns + # states = [s(0), s(1), ..., S(T)] + # rewards = [R(0), R(1), ..., R(T)] + + return states, rewards if __name__ == '__main__': @@ -95,17 +90,19 @@ def play_game(grid, policy, max_steps=20): # repeat for t in range(100): - # generate an episode using pi - states_and_returns = play_game(grid, policy) - seen_states = set() - for s, G in states_and_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - if s not in seen_states: + states, rewards = play_game(grid, policy) + G = 0 + T = len(states) + for t in range(T - 2, -1, -1): + s = states[t] + r = rewards[t+1] + G = r + GAMMA * G # update return + + # we'll use first-visit Monte Carlo + if s not in states[:t]: returns[s].append(G) V[s] = np.mean(returns[s]) - seen_states.add(s) print("values:") print_values(V, grid) diff --git a/rl/monte_carlo2.py b/rl/monte_carlo2.py deleted file mode 100644 index 9cfb9bde..00000000 --- a/rl/monte_carlo2.py +++ /dev/null @@ -1,110 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -GAMMA = 0.9 - -# NOTE: this is only policy evaluation, not optimization - -def play_game(grid, policy, max_steps=20): - # returns a list of states and corresponding returns - - # reset game to start at a random position - # we need to do this, because given our current deterministic policy - # we would never end up at certain states, but we still want to measure their value - start_states = list(grid.actions.keys()) - start_idx = np.random.choice(len(start_states)) - grid.set_state(start_states[start_idx]) - - s = grid.current_state() - - # keep track of all states and rewards encountered - states = [s] - rewards = [0] - - steps = 0 - while not grid.game_over(): - a = policy[s] - r = grid.move(a) - next_s = grid.current_state() - - # update states and rewards lists - states.append(next_s) - rewards.append(r) - - steps += 1 - if steps >= max_steps: - break - - # update state - # note: there is no need to store the final terminal state - s = next_s - - # we want to return: - # states = [s(0), s(1), ..., S(T)] - # rewards = [R(0), R(1), ..., R(T)] - - return states, rewards - - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'R', - (2, 1): 'R', - (2, 2): 'R', - (2, 3): 'U', - } - - # initialize V(s) and returns - V = {} - returns = {} # dictionary of state -> list of returns we've received - states = grid.all_states() - for s in states: - if s in grid.actions: - returns[s] = [] - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - # repeat - for t in range(100): - # generate an episode using pi - states, rewards = play_game(grid, policy) - G = 0 - T = len(states) - for t in range(T - 2, -1, -1): - s = states[t] - r = rewards[t+1] - G = r + GAMMA * G # update return - - # we'll use first-visit Monte Carlo - if s not in states[:t]: - returns[s].append(G) - V[s] = np.mean(returns[s]) - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) From b27c0127dff555524d3b694a22cb59e7e7ba235f Mon Sep 17 00:00:00 2001 From: User Date: Thu, 6 May 2021 20:54:44 -0400 Subject: [PATCH 238/329] update --- ab_testing/bayesian_normal.py | 4 +--- rl/bayesian_normal.py | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/ab_testing/bayesian_normal.py b/ab_testing/bayesian_normal.py index 4305e0f7..27ad6808 100644 --- a/ab_testing/bayesian_normal.py +++ b/ab_testing/bayesian_normal.py @@ -21,7 +21,6 @@ def __init__(self, true_mean): # parameters for mu - prior is N(0,1) self.m = 0 self.lambda_ = 1 - self.sum_x = 0 # for convenience self.tau = 1 self.N = 0 @@ -32,9 +31,8 @@ def sample(self): return np.random.randn() / np.sqrt(self.lambda_) + self.m def update(self, x): + self.m = (self.tau*x + self.lambda_ * self.m) / (self.tau + self.lambda_) self.lambda_ += self.tau - self.sum_x += x - self.m = self.tau*self.sum_x / self.lambda_ self.N += 1 diff --git a/rl/bayesian_normal.py b/rl/bayesian_normal.py index 4305e0f7..27ad6808 100644 --- a/rl/bayesian_normal.py +++ b/rl/bayesian_normal.py @@ -21,7 +21,6 @@ def __init__(self, true_mean): # parameters for mu - prior is N(0,1) self.m = 0 self.lambda_ = 1 - self.sum_x = 0 # for convenience self.tau = 1 self.N = 0 @@ -32,9 +31,8 @@ def sample(self): return np.random.randn() / np.sqrt(self.lambda_) + self.m def update(self, x): + self.m = (self.tau*x + self.lambda_ * self.m) / (self.tau + self.lambda_) self.lambda_ += self.tau - self.sum_x += x - self.m = self.tau*self.sum_x / self.lambda_ self.N += 1 From f7258f4415779585990146daca0f6d1bc1491462 Mon Sep 17 00:00:00 2001 From: User Date: Thu, 6 May 2021 20:55:31 -0400 Subject: [PATCH 239/329] update --- ab_testing/bayesian_normal.py | 2 +- rl/bayesian_normal.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ab_testing/bayesian_normal.py b/ab_testing/bayesian_normal.py index 27ad6808..07083d1b 100644 --- a/ab_testing/bayesian_normal.py +++ b/ab_testing/bayesian_normal.py @@ -31,7 +31,7 @@ def sample(self): return np.random.randn() / np.sqrt(self.lambda_) + self.m def update(self, x): - self.m = (self.tau*x + self.lambda_ * self.m) / (self.tau + self.lambda_) + self.m = (self.tau * x + self.lambda_ * self.m) / (self.tau + self.lambda_) self.lambda_ += self.tau self.N += 1 diff --git a/rl/bayesian_normal.py b/rl/bayesian_normal.py index 27ad6808..07083d1b 100644 --- a/rl/bayesian_normal.py +++ b/rl/bayesian_normal.py @@ -31,7 +31,7 @@ def sample(self): return np.random.randn() / np.sqrt(self.lambda_) + self.m def update(self, x): - self.m = (self.tau*x + self.lambda_ * self.m) / (self.tau + self.lambda_) + self.m = (self.tau * x + self.lambda_ * self.m) / (self.tau + self.lambda_) self.lambda_ += self.tau self.N += 1 From eb22d801fee70423db06b62570deec52c88bcc3f Mon Sep 17 00:00:00 2001 From: User Date: Mon, 10 May 2021 17:15:46 -0400 Subject: [PATCH 240/329] update --- tf2.0/exercises.txt | 35 +++++++++++++++++++++++++++++++++++ tf2.0/extra_reading.txt | 5 ++++- 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tf2.0/exercises.txt diff --git a/tf2.0/exercises.txt b/tf2.0/exercises.txt new file mode 100644 index 00000000..a12575a3 --- /dev/null +++ b/tf2.0/exercises.txt @@ -0,0 +1,35 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +CNN +https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge +https://lazyprogrammer.me/course_files/fer2013.csv + +RNN +Find your own stock price dataset! + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv + +Recommender Systems +http://www2.informatik.uni-freiburg.de/~cziegler/BX/ +http://lazyprogrammer.me/course_files/exercises/BX-CSV-Dump.zip + +Transfer Learning +https://www.kaggle.com/c/dogs-vs-cats + +GAN +https://www.kaggle.com/c/dogs-vs-cats + +DeepRL +Find your own stock price dataset! \ No newline at end of file diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt index 1542aa56..7d5afcf1 100644 --- a/tf2.0/extra_reading.txt +++ b/tf2.0/extra_reading.txt @@ -21,4 +21,7 @@ Massive Exploration of Neural Machine Translation Architectures https://arxiv.org/abs/1703.03906 Practical Deep Reinforcement Learning Approach for Stock Trading -https://arxiv.org/abs/1811.07522 \ No newline at end of file +https://arxiv.org/abs/1811.07522 + +Inceptionism: Going Deeper into Neural Networks +https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html \ No newline at end of file From 96aa4fd4a9384856d7293760f84d7d82315b62db Mon Sep 17 00:00:00 2001 From: User Date: Mon, 10 May 2021 17:16:18 -0400 Subject: [PATCH 241/329] update --- pytorch/exercises.txt | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 pytorch/exercises.txt diff --git a/pytorch/exercises.txt b/pytorch/exercises.txt new file mode 100644 index 00000000..a12575a3 --- /dev/null +++ b/pytorch/exercises.txt @@ -0,0 +1,35 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +CNN +https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge +https://lazyprogrammer.me/course_files/fer2013.csv + +RNN +Find your own stock price dataset! + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv + +Recommender Systems +http://www2.informatik.uni-freiburg.de/~cziegler/BX/ +http://lazyprogrammer.me/course_files/exercises/BX-CSV-Dump.zip + +Transfer Learning +https://www.kaggle.com/c/dogs-vs-cats + +GAN +https://www.kaggle.com/c/dogs-vs-cats + +DeepRL +Find your own stock price dataset! \ No newline at end of file From 802e81e38038aaa873a5a8019813256d9fe4ee3d Mon Sep 17 00:00:00 2001 From: User Date: Mon, 10 May 2021 17:17:35 -0400 Subject: [PATCH 242/329] update --- pytorch/exercises.txt | 3 ++- tf2.0/exercises.txt | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pytorch/exercises.txt b/pytorch/exercises.txt index a12575a3..aa364191 100644 --- a/pytorch/exercises.txt +++ b/pytorch/exercises.txt @@ -7,7 +7,8 @@ https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html https://lazyprogrammer.me/course_files/exercises/boston.txt ANN -https://archive.ics.uci.edu/ml/datasets/ecoli +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) https://lazyprogrammer.me/course_files/exercises/ecoli.csv CNN diff --git a/tf2.0/exercises.txt b/tf2.0/exercises.txt index a12575a3..aa364191 100644 --- a/tf2.0/exercises.txt +++ b/tf2.0/exercises.txt @@ -7,7 +7,8 @@ https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html https://lazyprogrammer.me/course_files/exercises/boston.txt ANN -https://archive.ics.uci.edu/ml/datasets/ecoli +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) https://lazyprogrammer.me/course_files/exercises/ecoli.csv CNN From f676a39d949505d4c109c79a076e46361455da15 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 15 May 2021 14:48:12 -0400 Subject: [PATCH 243/329] update --- rl/approx_control.py | 162 ++++++++++++++++++++++++++++++++++++++++ rl/approx_prediction.py | 144 +++++++++++++++++++++++++++++++++++ rl/cartpole.py | 153 +++++++++++++++++++++++++++++++++++++ 3 files changed, 459 insertions(+) create mode 100644 rl/approx_control.py create mode 100644 rl/approx_prediction.py create mode 100644 rl/cartpole.py diff --git a/rl/approx_control.py b/rl/approx_control.py new file mode 100644 index 00000000..ba19e58f --- /dev/null +++ b/rl/approx_control.py @@ -0,0 +1,162 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from grid_world import standard_grid, negative_grid +from iterative_policy_evaluation import print_values, print_policy +from sklearn.kernel_approximation import Nystroem, RBFSampler + +GAMMA = 0.9 +ALPHA = 0.1 +ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') +ACTION2INT = {a: i for i, a in enumerate(ALL_POSSIBLE_ACTIONS)} +INT2ONEHOT = np.eye(len(ALL_POSSIBLE_ACTIONS)) + + +def epsilon_greedy(model, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + values = model.predict_all_actions(s) + return ALL_POSSIBLE_ACTIONS[np.argmax(values)] + else: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + + +def one_hot(k): + return INT2ONEHOT[k] + + +def merge_state_action(s, a): + ai = one_hot(ACTION2INT[a]) + return np.concatenate((s, ai)) + + +def gather_samples(grid, n_episodes=1000): + samples = [] + for _ in range(n_episodes): + s = grid.reset() + while not grid.game_over(): + a = np.random.choice(ALL_POSSIBLE_ACTIONS) + sa = merge_state_action(s, a) + samples.append(sa) + + r = grid.move(a) + s = grid.current_state() + return samples + + +class Model: + def __init__(self, grid): + # fit the featurizer to data + samples = gather_samples(grid) + # self.featurizer = Nystroem() + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s, a): + sa = merge_state_action(s, a) + x = self.featurizer.transform([sa])[0] + return x @ self.w + + def predict_all_actions(self, s): + return [self.predict(s, a) for a in ALL_POSSIBLE_ACTIONS] + + def grad(self, s, a): + sa = merge_state_action(s, a) + x = self.featurizer.transform([sa])[0] + return x + + +if __name__ == '__main__': + # use the standard grid again (0 for every step) so that we can compare + # to iterative policy evaluation + # grid = standard_grid() + grid = negative_grid(step_cost=-0.1) + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + model = Model(grid) + reward_per_episode = [] + state_visit_count = {} + + # repeat until convergence + n_episodes = 20000 + for it in range(n_episodes): + if (it + 1) % 100 == 0: + print(it + 1) + + s = grid.reset() + state_visit_count[s] = state_visit_count.get(s, 0) + 1 + episode_reward = 0 + while not grid.game_over(): + a = epsilon_greedy(model, s) + r = grid.move(a) + s2 = grid.current_state() + state_visit_count[s2] = state_visit_count.get(s2, 0) + 1 + + # get the target + if grid.game_over(): + target = r + else: + values = model.predict_all_actions(s2) + target = r + GAMMA * np.max(values) + + # update the model + g = model.grad(s, a) + err = target - model.predict(s, a) + model.w += ALPHA * err * g + + # accumulate reward + episode_reward += r + + # update state + s = s2 + + reward_per_episode.append(episode_reward) + + plt.plot(reward_per_episode) + plt.title("Reward per episode") + plt.show() + + # obtain V* and pi* + V = {} + greedy_policy = {} + states = grid.all_states() + for s in states: + if s in grid.actions: + values = model.predict_all_actions(s) + V[s] = np.max(values) + greedy_policy[s] = ALL_POSSIBLE_ACTIONS[np.argmax(values)] + else: + # terminal state or state we can't otherwise get to + V[s] = 0 + + print("values:") + print_values(V, grid) + print("policy:") + print_policy(greedy_policy, grid) + + + print("state_visit_count:") + state_sample_count_arr = np.zeros((grid.rows, grid.cols)) + for i in range(grid.rows): + for j in range(grid.cols): + if (i, j) in state_visit_count: + state_sample_count_arr[i,j] = state_visit_count[(i, j)] + df = pd.DataFrame(state_sample_count_arr) + print(df) diff --git a/rl/approx_prediction.py b/rl/approx_prediction.py new file mode 100644 index 00000000..4e75d9fc --- /dev/null +++ b/rl/approx_prediction.py @@ -0,0 +1,144 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + + +import numpy as np +import matplotlib.pyplot as plt +from grid_world import standard_grid, negative_grid +from iterative_policy_evaluation import print_values, print_policy +from sklearn.kernel_approximation import Nystroem, RBFSampler + +GAMMA = 0.9 +ALPHA = 0.01 +ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') + + +def epsilon_greedy(greedy, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + return greedy[s] + else: + return np.random.choice(ALL_POSSIBLE_ACTIONS) + + +def gather_samples(grid, n_episodes=10000): + samples = [] + for _ in range(n_episodes): + s = grid.reset() + samples.append(s) + while not grid.game_over(): + a = np.random.choice(ALL_POSSIBLE_ACTIONS) + r = grid.move(a) + s = grid.current_state() + samples.append(s) + return samples + + +class Model: + def __init__(self, grid): + # fit the featurizer to data + samples = gather_samples(grid) + # self.featurizer = Nystroem() + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s): + x = self.featurizer.transform([s])[0] + return x @ self.w + + def grad(self, s): + x = self.featurizer.transform([s])[0] + return x + + +if __name__ == '__main__': + # use the standard grid again (0 for every step) so that we can compare + # to iterative policy evaluation + grid = standard_grid() + + # print rewards + print("rewards:") + print_values(grid.rewards, grid) + + # state -> action + greedy_policy = { + (2, 0): 'U', + (1, 0): 'U', + (0, 0): 'R', + (0, 1): 'R', + (0, 2): 'R', + (1, 2): 'R', + (2, 1): 'R', + (2, 2): 'R', + (2, 3): 'U', + } + + model = Model(grid) + mse_per_episode = [] + + # repeat until convergence + n_episodes = 10000 + for it in range(n_episodes): + if (it + 1) % 100 == 0: + print(it + 1) + + s = grid.reset() + Vs = model.predict(s) + n_steps = 0 + episode_err = 0 + while not grid.game_over(): + a = epsilon_greedy(greedy_policy, s) + r = grid.move(a) + s2 = grid.current_state() + + # get the target + if grid.is_terminal(s2): + target = r + else: + Vs2 = model.predict(s2) + target = r + GAMMA * Vs2 + + # update the model + g = model.grad(s) + err = target - Vs + model.w += ALPHA * err * g + + # accumulate error + n_steps += 1 + episode_err += err*err + + # update state + s = s2 + Vs = Vs2 + + mse = episode_err / n_steps + mse_per_episode.append(mse) + + plt.plot(mse_per_episode) + plt.title("MSE per episode") + plt.show() + + # obtain predicted values + V = {} + states = grid.all_states() + for s in states: + if s in grid.actions: + V[s] = model.predict(s) + else: + # terminal state or state we can't otherwise get to + V[s] = 0 + + print("values:") + print_values(V, grid) + print("policy:") + print_policy(greedy_policy, grid) diff --git a/rl/cartpole.py b/rl/cartpole.py new file mode 100644 index 00000000..2ef157b5 --- /dev/null +++ b/rl/cartpole.py @@ -0,0 +1,153 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import gym +import numpy as np +import matplotlib.pyplot as plt +from sklearn.kernel_approximation import RBFSampler + + +GAMMA = 0.99 +ALPHA = 0.1 + + +def epsilon_greedy(model, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + values = model.predict_all_actions(s) + return np.argmax(values) + else: + return model.env.action_space.sample() + + +def gather_samples(env, n_episodes=10000): + samples = [] + for _ in range(n_episodes): + s = env.reset() + done = False + while not done: + a = env.action_space.sample() + sa = np.concatenate((s, [a])) + samples.append(sa) + + s, r, done, info = env.step(a) + return samples + + +class Model: + def __init__(self, env): + # fit the featurizer to data + self.env = env + samples = gather_samples(env) + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x @ self.w + + def predict_all_actions(self, s): + return [self.predict(s, a) for a in range(self.env.action_space.n)] + + def grad(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x + + +def test_agent(model, env, n_episodes=20): + reward_per_episode = np.zeros(n_episodes) + for it in range(n_episodes): + done = False + episode_reward = 0 + s = env.reset() + while not done: + a = epsilon_greedy(model, s, eps=0) + s, r, done, info = env.step(a) + episode_reward += r + reward_per_episode[it] = episode_reward + return np.mean(reward_per_episode) + + +def watch_agent(model, env, eps): + done = False + episode_reward = 0 + s = env.reset() + while not done: + a = epsilon_greedy(model, s, eps=eps) + s, r, done, info = env.step(a) + env.render() + episode_reward += r + print("Episode reward:", episode_reward) + + +if __name__ == '__main__': + # instantiate environment + env = gym.make("CartPole-v0") + + model = Model(env) + reward_per_episode = [] + + # watch untrained agent + watch_agent(model, env, eps=0) + + # repeat until convergence + n_episodes = 1500 + for it in range(n_episodes): + s = env.reset() + episode_reward = 0 + done = False + while not done: + a = epsilon_greedy(model, s) + s2, r, done, info = env.step(a) + + # get the target + if done: + target = r + else: + values = model.predict_all_actions(s2) + target = r + GAMMA * np.max(values) + + # update the model + g = model.grad(s, a) + err = target - model.predict(s, a) + model.w += ALPHA * err * g + + # accumulate reward + episode_reward += r + + # update state + s = s2 + + if (it + 1) % 50 == 0: + print(f"Episode: {it + 1}, Reward: {episode_reward}") + + # early exit + if it > 20 and np.mean(reward_per_episode[-20:]) == 200: + print("Early exit") + break + + reward_per_episode.append(episode_reward) + + # test trained agent + test_reward = test_agent(model, env) + print(f"Average test reward: {test_reward}") + + plt.plot(reward_per_episode) + plt.title("Reward per episode") + plt.show() + + # watch trained agent + watch_agent(model, env, eps=0) + From 57f3464489781ba2580de20e539ea4bbf3cdef5e Mon Sep 17 00:00:00 2001 From: User Date: Sat, 15 May 2021 14:50:23 -0400 Subject: [PATCH 244/329] update --- rl/approx_mc_prediction.py | 106 ------------- rl/approx_q_learning.py | 190 ----------------------- rl/approx_semigradient_sarsa_control.py | 190 ----------------------- rl/approx_semigradient_td0_prediction.py | 101 ------------ 4 files changed, 587 deletions(-) delete mode 100644 rl/approx_mc_prediction.py delete mode 100644 rl/approx_q_learning.py delete mode 100644 rl/approx_semigradient_sarsa_control.py delete mode 100644 rl/approx_semigradient_td0_prediction.py diff --git a/rl/approx_mc_prediction.py b/rl/approx_mc_prediction.py deleted file mode 100644 index 91d649bb..00000000 --- a/rl/approx_mc_prediction.py +++ /dev/null @@ -1,106 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -# NOTE: this is only policy evaluation, not optimization - -# we'll try to obtain the same result as our other MC script -from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS - -LEARNING_RATE = 0.001 - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # found by policy_iteration_random on standard_grid - # MC method won't get exactly this, but should be close - # values: - # --------------------------- - # 0.43| 0.56| 0.72| 0.00| - # --------------------------- - # 0.33| 0.00| 0.21| 0.00| - # --------------------------- - # 0.25| 0.18| 0.11| -0.17| - # policy: - # --------------------------- - # R | R | R | | - # --------------------------- - # U | | U | | - # --------------------------- - # U | L | U | L | - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'U', - (2, 1): 'L', - (2, 2): 'U', - (2, 3): 'L', - } - - # initialize theta - # our model is V_hat = theta.dot(x) - # where x = [row, col, row*col, 1] - 1 for bias term - theta = np.random.randn(4) / 2 - def s2x(s): - return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1]) - - # repeat until convergence - deltas = [] - t = 1.0 - for it in range(20000): - if it % 100 == 0: - t += 0.01 - alpha = LEARNING_RATE/t - # generate an episode using pi - biggest_change = 0 - states_and_returns = play_game(grid, policy) - seen_states = set() - for s, G in states_and_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - if s not in seen_states: - old_theta = theta.copy() - x = s2x(s) - V_hat = theta.dot(x) - # grad(V_hat) wrt theta = x - theta += alpha*(G - V_hat)*x - biggest_change = max(biggest_change, np.abs(old_theta - theta).sum()) - seen_states.add(s) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # obtain predicted values - V = {} - states = grid.all_states() - for s in states: - if s in grid.actions: - V[s] = theta.dot(s2x(s)) - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/approx_q_learning.py b/rl/approx_q_learning.py deleted file mode 100644 index c3c1a35d..00000000 --- a/rl/approx_q_learning.py +++ /dev/null @@ -1,190 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy -from monte_carlo_es import max_dict -from sarsa import random_action, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS - -SA2IDX = {} -IDX = 0 - -class Model: - def __init__(self): - self.theta = np.random.randn(25) / np.sqrt(25) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # print "D:", IDX - # self.theta = np.random.randn(IDX) / np.sqrt(IDX) - - def sa2x(self, s, a): - # NOTE: using just (r, c, r*c, u, d, l, r, 1) is not expressive enough - return np.array([ - s[0] - 1 if a == 'U' else 0, - s[1] - 1.5 if a == 'U' else 0, - (s[0]*s[1] - 3)/3 if a == 'U' else 0, - (s[0]*s[0] - 2)/2 if a == 'U' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'U' else 0, - 1 if a == 'U' else 0, - s[0] - 1 if a == 'D' else 0, - s[1] - 1.5 if a == 'D' else 0, - (s[0]*s[1] - 3)/3 if a == 'D' else 0, - (s[0]*s[0] - 2)/2 if a == 'D' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'D' else 0, - 1 if a == 'D' else 0, - s[0] - 1 if a == 'L' else 0, - s[1] - 1.5 if a == 'L' else 0, - (s[0]*s[1] - 3)/3 if a == 'L' else 0, - (s[0]*s[0] - 2)/2 if a == 'L' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'L' else 0, - 1 if a == 'L' else 0, - s[0] - 1 if a == 'R' else 0, - s[1] - 1.5 if a == 'R' else 0, - (s[0]*s[1] - 3)/3 if a == 'R' else 0, - (s[0]*s[0] - 2)/2 if a == 'R' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'R' else 0, - 1 if a == 'R' else 0, - 1 - ]) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # x = np.zeros(len(self.theta)) - # idx = SA2IDX[s][a] - # x[idx] = 1 - # return x - - def predict(self, s, a): - x = self.sa2x(s, a) - return self.theta.dot(x) - - def grad(self, s, a): - return self.sa2x(s, a) - - -def getQs(model, s): - # we need Q(s,a) to choose an action - # i.e. a = argmax[a]{ Q(s,a) } - Qs = {} - for a in ALL_POSSIBLE_ACTIONS: - q_sa = model.predict(s, a) - Qs[a] = q_sa - return Qs - - -if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # - # grid = standard_grid() - grid = negative_grid(step_cost=-0.1) - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # no policy initialization, we will derive our policy from most recent Q - # enumerate all (s,a) pairs, each will have its own weight in our "dumb" model - # essentially each weight will be a measure of Q(s,a) itself - states = grid.all_states() - for s in states: - SA2IDX[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - SA2IDX[s][a] = IDX - IDX += 1 - - # initialize model - model = Model() - - # repeat until convergence - t = 1.0 - t2 = 1.0 - deltas = [] - for it in range(20000): - if it % 100 == 0: - t += 0.01 - t2 += 0.01 - if it % 1000 == 0: - print("it:", it) - alpha = ALPHA / t2 - - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # get Q(s) so we can choose the first action - Qs = getQs(model, s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a = max_dict(Qs)[0] - a = random_action(a, eps=0.5/t) # epsilon-greedy - biggest_change = 0 - while not grid.game_over(): - r = grid.move(a) - s2 = grid.current_state() - - # we need the next action as well since Q(s,a) depends on Q(s',a') - # if s2 not in policy then it's a terminal state, all Q are 0 - old_theta = model.theta.copy() - if grid.is_terminal(s2): - model.theta += alpha*(r - model.predict(s, a))*model.grad(s, a) - else: - # not terminal - Qs2 = getQs(model, s2) - a2, maxQs2a2 = max_dict(Qs2) - a2 = random_action(a2, eps=0.5/t) # epsilon-greedy - - # we will update Q(s,a) AS we experience the episode - model.theta += alpha*(r + GAMMA*maxQs2a2 - model.predict(s, a))*model.grad(s, a) - - # next state becomes current state - s = s2 - a = a2 - - biggest_change = max(biggest_change, np.abs(model.theta - old_theta).sum()) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # determine the policy from Q* - # find V* from Q* - policy = {} - V = {} - Q = {} - for s in grid.actions.keys(): - Qs = getQs(model, s) - Q[s] = Qs - a, max_q = max_dict(Qs) - policy[s] = a - V[s] = max_q - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/approx_semigradient_sarsa_control.py b/rl/approx_semigradient_sarsa_control.py deleted file mode 100644 index c7ce0a79..00000000 --- a/rl/approx_semigradient_sarsa_control.py +++ /dev/null @@ -1,190 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy -from monte_carlo_es import max_dict -from sarsa import random_action, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS - -SA2IDX = {} -IDX = 0 - -class Model: - def __init__(self): - self.theta = np.random.randn(25) / np.sqrt(25) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # print "D:", IDX - # self.theta = np.random.randn(IDX) / np.sqrt(IDX) - - def sa2x(self, s, a): - # NOTE: using just (r, c, r*c, u, d, l, r, 1) is not expressive enough - return np.array([ - s[0] - 1 if a == 'U' else 0, - s[1] - 1.5 if a == 'U' else 0, - (s[0]*s[1] - 3)/3 if a == 'U' else 0, - (s[0]*s[0] - 2)/2 if a == 'U' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'U' else 0, - 1 if a == 'U' else 0, - s[0] - 1 if a == 'D' else 0, - s[1] - 1.5 if a == 'D' else 0, - (s[0]*s[1] - 3)/3 if a == 'D' else 0, - (s[0]*s[0] - 2)/2 if a == 'D' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'D' else 0, - 1 if a == 'D' else 0, - s[0] - 1 if a == 'L' else 0, - s[1] - 1.5 if a == 'L' else 0, - (s[0]*s[1] - 3)/3 if a == 'L' else 0, - (s[0]*s[0] - 2)/2 if a == 'L' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'L' else 0, - 1 if a == 'L' else 0, - s[0] - 1 if a == 'R' else 0, - s[1] - 1.5 if a == 'R' else 0, - (s[0]*s[1] - 3)/3 if a == 'R' else 0, - (s[0]*s[0] - 2)/2 if a == 'R' else 0, - (s[1]*s[1] - 4.5)/4.5 if a == 'R' else 0, - 1 if a == 'R' else 0, - 1 - ]) - # if we use SA2IDX, a one-hot encoding for every (s,a) pair - # in reality we wouldn't want to do this b/c we have just - # as many params as before - # x = np.zeros(len(self.theta)) - # idx = SA2IDX[s][a] - # x[idx] = 1 - # return x - - def predict(self, s, a): - x = self.sa2x(s, a) - return self.theta.dot(x) - - def grad(self, s, a): - return self.sa2x(s, a) - - -def getQs(model, s): - # we need Q(s,a) to choose an action - # i.e. a = argmax[a]{ Q(s,a) } - Qs = {} - for a in ALL_POSSIBLE_ACTIONS: - q_sa = model.predict(s, a) - Qs[a] = q_sa - return Qs - - -if __name__ == '__main__': - # NOTE: if we use the standard grid, there's a good chance we will end up with - # suboptimal policies - # e.g. - # --------------------------- - # R | R | R | | - # --------------------------- - # R* | | U | | - # --------------------------- - # U | R | U | L | - # since going R at (1,0) (shown with a *) incurs no cost, it's OK to keep doing that. - # we'll either end up staying in the same spot, or back to the start (2,0), at which - # point we whould then just go back up, or at (0,0), at which point we can continue - # on right. - # instead, let's penalize each movement so the agent will find a shorter route. - # - # grid = standard_grid() - grid = negative_grid(step_cost=-0.1) - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # no policy initialization, we will derive our policy from most recent Q - # enumerate all (s,a) pairs, each will have its own weight in our "dumb" model - # essentially each weight will be a measure of Q(s,a) itself - states = grid.all_states() - for s in states: - SA2IDX[s] = {} - for a in ALL_POSSIBLE_ACTIONS: - SA2IDX[s][a] = IDX - IDX += 1 - - # initialize model - model = Model() - - # repeat until convergence - t = 1.0 - t2 = 1.0 - deltas = [] - for it in range(20000): - if it % 100 == 0: - t += 0.01 - t2 += 0.01 - if it % 1000 == 0: - print("it:", it) - alpha = ALPHA / t2 - - # instead of 'generating' an epsiode, we will PLAY - # an episode within this loop - s = (2, 0) # start state - grid.set_state(s) - - # get Q(s) so we can choose the first action - Qs = getQs(model, s) - - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - a = max_dict(Qs)[0] - a = random_action(a, eps=0.5/t) # epsilon-greedy - biggest_change = 0 - while not grid.game_over(): - r = grid.move(a) - s2 = grid.current_state() - - # we need the next action as well since Q(s,a) depends on Q(s',a') - # if s2 not in policy then it's a terminal state, all Q are 0 - old_theta = model.theta.copy() - if grid.is_terminal(s2): - model.theta += alpha*(r - model.predict(s, a))*model.grad(s, a) - else: - # not terminal - Qs2 = getQs(model, s2) - a2 = max_dict(Qs2)[0] - a2 = random_action(a2, eps=0.5/t) # epsilon-greedy - - # we will update Q(s,a) AS we experience the episode - model.theta += alpha*(r + GAMMA*model.predict(s2, a2) - model.predict(s, a))*model.grad(s, a) - - # next state becomes current state - s = s2 - a = a2 - - biggest_change = max(biggest_change, np.abs(model.theta - old_theta).sum()) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # determine the policy from Q* - # find V* from Q* - policy = {} - V = {} - Q = {} - for s in grid.actions.keys(): - Qs = getQs(model, s) - Q[s] = Qs - a, max_q = max_dict(Qs) - policy[s] = a - V[s] = max_q - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) diff --git a/rl/approx_semigradient_td0_prediction.py b/rl/approx_semigradient_td0_prediction.py deleted file mode 100644 index ea9430be..00000000 --- a/rl/approx_semigradient_td0_prediction.py +++ /dev/null @@ -1,101 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -import matplotlib.pyplot as plt -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy -from td0_prediction import play_game, SMALL_ENOUGH, GAMMA, ALPHA, ALL_POSSIBLE_ACTIONS - -# NOTE: this is only policy evaluation, not optimization - -class Model: - def __init__(self): - self.theta = np.random.randn(4) / 2 - - def s2x(self, s): - return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1]) - - def predict(self, s): - x = self.s2x(s) - return self.theta.dot(x) - - def grad(self, s): - return self.s2x(s) - - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'R', - (2, 1): 'R', - (2, 2): 'R', - (2, 3): 'U', - } - - model = Model() - deltas = [] - - # repeat until convergence - k = 1.0 - for it in range(20000): - if it % 10 == 0: - k += 0.01 - alpha = ALPHA/k - biggest_change = 0 - - # generate an episode using pi - states_and_rewards = play_game(grid, policy) - # the first (s, r) tuple is the state we start in and 0 - # (since we don't get a reward) for simply starting the game - # the last (s, r) tuple is the terminal state and the final reward - # the value for the terminal state is by definition 0, so we don't - # care about updating it. - for t in range(len(states_and_rewards) - 1): - s, _ = states_and_rewards[t] - s2, r = states_and_rewards[t+1] - # we will update V(s) AS we experience the episode - old_theta = model.theta.copy() - if grid.is_terminal(s2): - target = r - else: - target = r + GAMMA*model.predict(s2) - model.theta += alpha*(target - model.predict(s))*model.grad(s) - biggest_change = max(biggest_change, np.abs(old_theta - model.theta).sum()) - deltas.append(biggest_change) - - plt.plot(deltas) - plt.show() - - # obtain predicted values - V = {} - states = grid.all_states() - for s in states: - if s in grid.actions: - V[s] = model.predict(s) - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) From 9f28a37f020d70f183ffcddcf1a4cdcca8b3ddc6 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 15 May 2021 14:51:58 -0400 Subject: [PATCH 245/329] update --- rl/iterative_policy_evaluation.py | 120 --------------------------- rl/monte_carlo_random.py | 131 ------------------------------ 2 files changed, 251 deletions(-) delete mode 100644 rl/iterative_policy_evaluation.py delete mode 100644 rl/monte_carlo_random.py diff --git a/rl/iterative_policy_evaluation.py b/rl/iterative_policy_evaluation.py deleted file mode 100644 index fea8f438..00000000 --- a/rl/iterative_policy_evaluation.py +++ /dev/null @@ -1,120 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid - -SMALL_ENOUGH = 1e-3 # threshold for convergence - -def print_values(V, g): - for i in range(g.rows): - print("---------------------------") - for j in range(g.cols): - v = V.get((i,j), 0) - if v >= 0: - print(" %.2f|" % v, end="") - else: - print("%.2f|" % v, end="") # -ve sign takes up an extra space - print("") - - -def print_policy(P, g): - for i in range(g.rows): - print("---------------------------") - for j in range(g.cols): - a = P.get((i,j), ' ') - print(" %s |" % a, end="") - print("") - -if __name__ == '__main__': - # iterative policy evaluation - # given a policy, let's find it's value function V(s) - # we will do this for both a uniform random policy and fixed policy - # NOTE: - # there are 2 sources of randomness - # p(a|s) - deciding what action to take given the state - # p(s',r|s,a) - the next state and reward given your action-state pair - # we are only modeling p(a|s) = uniform - # how would the code change if p(s',r|s,a) is not deterministic? - grid = standard_grid() - - # states will be positions (i,j) - # simpler than tic-tac-toe because we only have one "game piece" - # that can only be at one position at a time - states = grid.all_states() - - ### uniformly random actions ### - # initialize V(s) = 0 - V = {} - for s in states: - V[s] = 0 - gamma = 1.0 # discount factor - # repeat until convergence - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in grid.actions: - - new_v = 0 # we will accumulate the answer - p_a = 1.0 / len(grid.actions[s]) # each action has equal probability - for a in grid.actions[s]: - grid.set_state(s) - r = grid.move(a) - new_v += p_a * (r + gamma * V[grid.current_state()]) - V[s] = new_v - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - print("values for uniformly random actions:") - print_values(V, grid) - print("\n\n") - - ### fixed policy ### - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'R', - (2, 1): 'R', - (2, 2): 'R', - (2, 3): 'U', - } - print_policy(policy, grid) - - # initialize V(s) = 0 - V = {} - for s in states: - V[s] = 0 - - # let's see how V(s) changes as we get further away from the reward - gamma = 0.9 # discount factor - - # repeat until convergence - while True: - biggest_change = 0 - for s in states: - old_v = V[s] - - # V(s) only has value if it's not a terminal state - if s in policy: - a = policy[s] - grid.set_state(s) - r = grid.move(a) - V[s] = r + gamma * V[grid.current_state()] - biggest_change = max(biggest_change, np.abs(old_v - V[s])) - - if biggest_change < SMALL_ENOUGH: - break - print("values for fixed policy:") - print_values(V, grid) diff --git a/rl/monte_carlo_random.py b/rl/monte_carlo_random.py deleted file mode 100644 index 9498b2e1..00000000 --- a/rl/monte_carlo_random.py +++ /dev/null @@ -1,131 +0,0 @@ -# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python -# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python -from __future__ import print_function, division -from builtins import range -# Note: you may need to update your version of future -# sudo pip install -U future - - -import numpy as np -from grid_world import standard_grid, negative_grid -from iterative_policy_evaluation import print_values, print_policy - -SMALL_ENOUGH = 1e-3 -GAMMA = 0.9 -ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R') - -# NOTE: this is only policy evaluation, not optimization - -def random_action(a): - # choose given a with probability 0.5 - # choose some other a' != a with probability 0.5/3 - p = np.random.random() - if p < 0.5: - return a - else: - tmp = list(ALL_POSSIBLE_ACTIONS) - tmp.remove(a) - return np.random.choice(tmp) - -def play_game(grid, policy): - # returns a list of states and corresponding returns - - # reset game to start at a random position - # we need to do this, because given our current deterministic policy - # we would never end up at certain states, but we still want to measure their value - start_states = list(grid.actions.keys()) - start_idx = np.random.choice(len(start_states)) - grid.set_state(start_states[start_idx]) - - s = grid.current_state() - states_and_rewards = [(s, 0)] # list of tuples of (state, reward) - while not grid.game_over(): - a = policy[s] - a = random_action(a) - r = grid.move(a) - s = grid.current_state() - states_and_rewards.append((s, r)) - # calculate the returns by working backwards from the terminal state - G = 0 - states_and_returns = [] - first = True - for s, r in reversed(states_and_rewards): - # the value of the terminal state is 0 by definition - # we should ignore the first state we encounter - # and ignore the last G, which is meaningless since it doesn't correspond to any move - if first: - first = False - else: - states_and_returns.append((s, G)) - G = r + GAMMA*G - states_and_returns.reverse() # we want it to be in order of state visited - return states_and_returns - - -if __name__ == '__main__': - # use the standard grid again (0 for every step) so that we can compare - # to iterative policy evaluation - grid = standard_grid() - - # print rewards - print("rewards:") - print_values(grid.rewards, grid) - - # state -> action - # found by policy_iteration_random on standard_grid - # MC method won't get exactly this, but should be close - # values: - # --------------------------- - # 0.43| 0.56| 0.72| 0.00| - # --------------------------- - # 0.33| 0.00| 0.21| 0.00| - # --------------------------- - # 0.25| 0.18| 0.11| -0.17| - # policy: - # --------------------------- - # R | R | R | | - # --------------------------- - # U | | U | | - # --------------------------- - # U | L | U | L | - policy = { - (2, 0): 'U', - (1, 0): 'U', - (0, 0): 'R', - (0, 1): 'R', - (0, 2): 'R', - (1, 2): 'U', - (2, 1): 'L', - (2, 2): 'U', - (2, 3): 'L', - } - - # initialize V(s) and returns - V = {} - returns = {} # dictionary of state -> list of returns we've received - states = grid.all_states() - for s in states: - if s in grid.actions: - returns[s] = [] - else: - # terminal state or state we can't otherwise get to - V[s] = 0 - - # repeat until convergence - for t in range(5000): - - # generate an episode using pi - states_and_returns = play_game(grid, policy) - seen_states = set() - for s, G in states_and_returns: - # check if we have already seen s - # called "first-visit" MC policy evaluation - if s not in seen_states: - returns[s].append(G) - V[s] = np.mean(returns[s]) - seen_states.add(s) - - print("values:") - print_values(V, grid) - print("policy:") - print_policy(policy, grid) From 471317677eb2fdba48f471637d7624fcd58c3224 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 15 May 2021 18:30:58 -0400 Subject: [PATCH 246/329] update --- cnn_class/exercises.txt | 20 ++++++++++++++++++++ rnn_class/exercises.txt | 19 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 cnn_class/exercises.txt create mode 100644 rnn_class/exercises.txt diff --git a/cnn_class/exercises.txt b/cnn_class/exercises.txt new file mode 100644 index 00000000..4fdbc856 --- /dev/null +++ b/cnn_class/exercises.txt @@ -0,0 +1,20 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +CNN +https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge +https://lazyprogrammer.me/course_files/fer2013.csv + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv \ No newline at end of file diff --git a/rnn_class/exercises.txt b/rnn_class/exercises.txt new file mode 100644 index 00000000..612eea3e --- /dev/null +++ b/rnn_class/exercises.txt @@ -0,0 +1,19 @@ +Logistic Regression +https://www.kaggle.com/uciml/pima-indians-diabetes-database +https://lazyprogrammer.me/course_files/exercises/diabetes.csv + +Linear Regression +https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html +https://lazyprogrammer.me/course_files/exercises/boston.txt + +ANN +https://archive.ics.uci.edu/ml/datasets/ecoli (orig) +https://www.kaggle.com/elikplim/ecoli-data-set (alt) +https://lazyprogrammer.me/course_files/exercises/ecoli.csv + +RNN +Find your own stock price dataset! + +NLP +https://www.kaggle.com/crowdflower/twitter-airline-sentiment +https://lazyprogrammer.me/course_files/exercises/AirlineSentimentTweets.csv \ No newline at end of file From db0bead4c6366d6291ff203bc306d2d83c09d31d Mon Sep 17 00:00:00 2001 From: User Date: Wed, 2 Jun 2021 15:03:20 -0400 Subject: [PATCH 247/329] update --- pytorch/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pytorch/extra_reading.txt b/pytorch/extra_reading.txt index 7d5afcf1..e7bdc2ea 100644 --- a/pytorch/extra_reading.txt +++ b/pytorch/extra_reading.txt @@ -11,6 +11,9 @@ https://arxiv.org/abs/1502.03167 Dropout: A Simple Way to Prevent Neural Networks from Overfitting https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf +Implementing Dropout +https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ + Convolution arithmetic tutorial http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html From 3000b13b64d676d7432a6231f78036188ce6d0d5 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 11 Jun 2021 20:35:10 -0400 Subject: [PATCH 248/329] time series --- timeseries/extra_reading.txt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 timeseries/extra_reading.txt diff --git a/timeseries/extra_reading.txt b/timeseries/extra_reading.txt new file mode 100644 index 00000000..577c5727 --- /dev/null +++ b/timeseries/extra_reading.txt @@ -0,0 +1,24 @@ +Estimating Box-Cox power transformation parameter via goodness of fit tests +https://arxiv.org/pdf/1401.3812.pdf + +Linear Regression +https://deeplearningcourses.com/c/data-science-linear-regression-in-python/ + +Logistic Regression +https://deeplearningcourses.com/c/data-science-logistic-regression-in-python/ + +Support Vector Machines +https://deeplearningcourses.com/c/support-vector-machines-in-python + +Random Forests +https://deeplearningcourses.com/c/machine-learning-in-python-random-forest-adaboost + +Deep Learning and Tensorflow 2 +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 + +Gaussian Processes for Regression and Classification +https://www.cs.toronto.edu/~radford/ftp/val6gp.pdf + +How Does Backpropagation Work? +https://deeplearningcourses.com/c/data-science-deep-learning-in-python/ +https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ \ No newline at end of file From db00ac84ed36549550b62691b592742e516117c4 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 18 Jun 2021 20:46:42 -0400 Subject: [PATCH 249/329] update --- timeseries/extra_reading.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/timeseries/extra_reading.txt b/timeseries/extra_reading.txt index 577c5727..901fee94 100644 --- a/timeseries/extra_reading.txt +++ b/timeseries/extra_reading.txt @@ -19,6 +19,6 @@ https://deeplearningcourses.com/c/deep-learning-tensorflow-2 Gaussian Processes for Regression and Classification https://www.cs.toronto.edu/~radford/ftp/val6gp.pdf -How Does Backpropagation Work? +How Does Backpropagation Work? (In-Depth) https://deeplearningcourses.com/c/data-science-deep-learning-in-python/ https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ \ No newline at end of file From a5087bca3d3c2e0cbe95cae30307c900a19302a0 Mon Sep 17 00:00:00 2001 From: User Date: Sun, 20 Jun 2021 13:56:05 -0400 Subject: [PATCH 250/329] update --- hmm_class/hmmc_theano.py | 3 +-- hmm_class/hmmc_theano2.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/hmm_class/hmmc_theano.py b/hmm_class/hmmc_theano.py index a68787cb..57d96ff1 100644 --- a/hmm_class/hmmc_theano.py +++ b/hmm_class/hmmc_theano.py @@ -28,8 +28,7 @@ def __init__(self, M, K): self.K = K # number of Gaussians def fit(self, X, learning_rate=1e-2, max_iter=10): - # train the HMM model using the Baum-Welch algorithm - # a specific instance of the expectation-maximization algorithm + # train the HMM model using gradient descent N = len(X) D = X[0].shape[1] # assume each x is organized (T, D) diff --git a/hmm_class/hmmc_theano2.py b/hmm_class/hmmc_theano2.py index 865e89b0..647c8173 100644 --- a/hmm_class/hmmc_theano2.py +++ b/hmm_class/hmmc_theano2.py @@ -30,8 +30,7 @@ def __init__(self, M, K): self.K = K # number of Gaussians def fit(self, X, learning_rate=1e-2, max_iter=10): - # train the HMM model using the Baum-Welch algorithm - # a specific instance of the expectation-maximization algorithm + # train the HMM model using gradient descent N = len(X) D = X[0].shape[1] # assume each x is organized (T, D) From 42420d5e4cde3429e9baca92c5390fce70ae3e74 Mon Sep 17 00:00:00 2001 From: User Date: Sun, 27 Jun 2021 17:16:52 -0400 Subject: [PATCH 251/329] update --- rl/monte_carlo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rl/monte_carlo.py b/rl/monte_carlo.py index 9cfb9bde..aedf786f 100644 --- a/rl/monte_carlo.py +++ b/rl/monte_carlo.py @@ -89,7 +89,7 @@ def play_game(grid, policy, max_steps=20): V[s] = 0 # repeat - for t in range(100): + for _ in range(100): # generate an episode using pi states, rewards = play_game(grid, policy) G = 0 From 40c6621203e3fc452074f1f436f545fa2b8d9b32 Mon Sep 17 00:00:00 2001 From: User Date: Thu, 29 Jul 2021 02:04:01 -0400 Subject: [PATCH 252/329] update --- timeseries/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/timeseries/extra_reading.txt b/timeseries/extra_reading.txt index 901fee94..030c4f3f 100644 --- a/timeseries/extra_reading.txt +++ b/timeseries/extra_reading.txt @@ -21,4 +21,7 @@ https://www.cs.toronto.edu/~radford/ftp/val6gp.pdf How Does Backpropagation Work? (In-Depth) https://deeplearningcourses.com/c/data-science-deep-learning-in-python/ -https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ \ No newline at end of file +https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ + +Forecasting at Scale (Facebook Prophet) +https://peerj.com/preprints/3190.pdf \ No newline at end of file From e11f05cb76c4b9d3d2d8f02aee783d207f40c28a Mon Sep 17 00:00:00 2001 From: User Date: Mon, 9 Aug 2021 03:04:15 -0400 Subject: [PATCH 253/329] update --- timeseries/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/timeseries/extra_reading.txt b/timeseries/extra_reading.txt index 030c4f3f..5e20b991 100644 --- a/timeseries/extra_reading.txt +++ b/timeseries/extra_reading.txt @@ -24,4 +24,7 @@ https://deeplearningcourses.com/c/data-science-deep-learning-in-python/ https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ Forecasting at Scale (Facebook Prophet) -https://peerj.com/preprints/3190.pdf \ No newline at end of file +https://peerj.com/preprints/3190.pdf + +Statistical and Machine Learning forecasting methods: Concerns and ways forward +https://journals.plos.org/plosone/article%3Fid%3D10.1371/journal.pone.0194889 \ No newline at end of file From 698466580af9add2b23bb9de09bd5c688ec23dbe Mon Sep 17 00:00:00 2001 From: User Date: Fri, 1 Oct 2021 14:51:34 -0400 Subject: [PATCH 254/329] update readme --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 116108a3..eb233dfd 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,15 @@ Find associated tutorials at https://lazyprogrammer.me Find associated courses at https://deeplearningcourses.com -Please note that not all code from all courses will be found in this repository. Some newer code examples (e.g. everything from Tensorflow 2.0) were done in Google Colab. Therefore, you should check the instructions given in the lectures for the course you are taking. +Please note that not all code from all courses will be found in this repository. Some newer code examples (e.g. most of Tensorflow 2.0) were done in Google Colab. Therefore, you should check the instructions given in the lectures for the course you are taking. + + +How to I find the code for a particular course? +=============================================== + +The code for each course is separated by folder. You can determine which folder corresponds with which course by watching the "Where to get the code" lecture inside the course (usually Lecture 2 or 3). + +Remember: one folder = one course. Why you should not fork this repo From af7c2c958ab016ca63f616f393664ebb8fbe4ce2 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 1 Oct 2021 14:55:16 -0400 Subject: [PATCH 255/329] update readme --- README.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index eb233dfd..4111ed67 100644 --- a/README.md +++ b/README.md @@ -33,21 +33,24 @@ Beginning with Tensorflow 2, I started to use Google Colab. For those courses, u VIP Course Links =================== -**Financial Engineering and Artificial Intelligence in Python** (special discount link for full VIP version as of Apr 2021) +*** Note: if any of these coupons becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. FINANCEVIP2, FINANCEVIP3, etc.. -*** Note: if this coupon becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. FINANCEVIP2, FINANCEVIP3, etc.. +**Time Series Analysis, Forecasting, and Machine Learning** -https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP7 +https://www.udemy.com/course/time-series-analysis/?couponCode=TIMEVIP4 -**PyTorch: Deep Learning and Artificial Intelligence** (special discount link for full VIP course as of Apr 2021) +**Financial Engineering and Artificial Intelligence in Python** -*** Note: if this coupon becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. PYTORCHVIP6, PYTORCHVIP7, etc.. +https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP13 -https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP12 +**PyTorch: Deep Learning and Artificial Intelligence** -**Tensorflow 2.0: Deep Learning and Artificial Intelligence** (VIP Content Only) +https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP18 + + +**Tensorflow 2.0: Deep Learning and Artificial Intelligence** (VIP Version) https://deeplearningcourses.com/c/deep-learning-tensorflow-2 From b80a63fb50f9d08d4bc8cd7fe76f721c3b81ccf2 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 1 Oct 2021 14:57:06 -0400 Subject: [PATCH 256/329] update readme --- README.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4111ed67..261784af 100644 --- a/README.md +++ b/README.md @@ -55,11 +55,24 @@ https://deeplearningcourses.com/c/deep-learning-tensorflow-2 +Deep Learning Courses Exclusives +================================ + +Classical Statistical Inference and A/B Testing in Python +https://deeplearningcourses.com/c/statistical-inference-in-python + +Linear Programming for Linear Regression in Python +https://deeplearningcourses.com/c/linear-programming-python + +MATLAB for Students, Engineers, and Professionals in STEM +https://deeplearningcourses.com/c/matlab + + + Other Course Links ================== -Tensorflow 2.0: Deep Learning and Artificial Intelligence -(Main Course - special discount link) +Tensorflow 2.0: Deep Learning and Artificial Intelligence (non-VIP version) https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 Cutting-Edge AI: Deep Reinforcement Learning in Python From 4e97d48acfb3c758375e35f320fe283cbb376eff Mon Sep 17 00:00:00 2001 From: User Date: Fri, 19 Nov 2021 03:06:36 -0500 Subject: [PATCH 257/329] update --- ann_class2/extra_reading.txt | 5 ++++- tf2.0/extra_reading.txt | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ann_class2/extra_reading.txt b/ann_class2/extra_reading.txt index 096d0a7c..64c17548 100644 --- a/ann_class2/extra_reading.txt +++ b/ann_class2/extra_reading.txt @@ -28,4 +28,7 @@ Advances in optimizing Recurrent Networks by Yoshua Bengio, Section 3.5 http://arxiv.org/pdf/1212.0901v2.pdf Dropout: A Simple Way to Prevent Neural Networks from Overfitting -https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf \ No newline at end of file +https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf + +The Loss Surfaces of Multilayer Networks +https://arxiv.org/pdf/1412.0233.pdf \ No newline at end of file diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt index 7d5afcf1..041ff992 100644 --- a/tf2.0/extra_reading.txt +++ b/tf2.0/extra_reading.txt @@ -24,4 +24,7 @@ Practical Deep Reinforcement Learning Approach for Stock Trading https://arxiv.org/abs/1811.07522 Inceptionism: Going Deeper into Neural Networks -https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html \ No newline at end of file +https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html + +The Loss Surfaces of Multilayer Networks +https://arxiv.org/pdf/1412.0233.pdf \ No newline at end of file From 6d35fa0cd9d1422534de9ce603a29623944dc384 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 27 Nov 2021 14:47:23 -0500 Subject: [PATCH 258/329] update --- ann_class2/momentum.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ann_class2/momentum.py b/ann_class2/momentum.py index 4b3890e3..8fb86962 100644 --- a/ann_class2/momentum.py +++ b/ann_class2/momentum.py @@ -56,6 +56,7 @@ def main(): losses_batch = [] errors_batch = [] for i in range(max_iter): + Xtrain, Ytrain, Ytrain_ind = shuffle(Xtrain, Ytrain, Ytrain_ind) for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] @@ -100,6 +101,7 @@ def main(): dW1 = 0 db1 = 0 for i in range(max_iter): + Xtrain, Ytrain, Ytrain_ind = shuffle(Xtrain, Ytrain, Ytrain_ind) for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] @@ -151,6 +153,7 @@ def main(): vW1 = 0 vb1 = 0 for i in range(max_iter): + Xtrain, Ytrain, Ytrain_ind = shuffle(Xtrain, Ytrain, Ytrain_ind) for j in range(n_batches): Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),] Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),] From ae490f61fc3c2101e885e82b64726c639ac227f1 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 3 Dec 2021 16:15:37 -0500 Subject: [PATCH 259/329] update --- nlp_class3/cnn_toxic.py | 1 + nlp_class3/lstm_toxic.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nlp_class3/cnn_toxic.py b/nlp_class3/cnn_toxic.py index 57d86d1e..f0c55604 100644 --- a/nlp_class3/cnn_toxic.py +++ b/nlp_class3/cnn_toxic.py @@ -19,6 +19,7 @@ # Download the data: # https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge +# https://lazyprogrammer.me/course_files/toxic_comment_train.csv # Download the word vectors: # http://nlp.stanford.edu/data/glove.6B.zip diff --git a/nlp_class3/lstm_toxic.py b/nlp_class3/lstm_toxic.py index 71f4947d..113c3c5d 100644 --- a/nlp_class3/lstm_toxic.py +++ b/nlp_class3/lstm_toxic.py @@ -27,6 +27,7 @@ # Download the data: # https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge +# https://lazyprogrammer.me/course_files/toxic_comment_train.csv # Download the word vectors: # http://nlp.stanford.edu/data/glove.6B.zip @@ -115,8 +116,8 @@ # create an LSTM network with a single LSTM input_ = Input(shape=(MAX_SEQUENCE_LENGTH,)) x = embedding_layer(input_) -# x = LSTM(15, return_sequences=True)(x) -x = Bidirectional(LSTM(15, return_sequences=True))(x) +x = LSTM(15, return_sequences=True)(x) +# x = Bidirectional(LSTM(15, return_sequences=True))(x) x = GlobalMaxPool1D()(x) output = Dense(len(possible_labels), activation="sigmoid")(x) From 4a7e6a0f66ee0e14fa43ed96bd98b631e47e8db2 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 22 Dec 2021 01:07:51 -0500 Subject: [PATCH 260/329] update --- nlp_v2/extra_reading.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 nlp_v2/extra_reading.txt diff --git a/nlp_v2/extra_reading.txt b/nlp_v2/extra_reading.txt new file mode 100644 index 00000000..b76f7ddc --- /dev/null +++ b/nlp_v2/extra_reading.txt @@ -0,0 +1,11 @@ +An information-theoretic perspective of tf–idf measures +https://www.sciencedirect.com/science/article/abs/pii/S0306457302000213 + +A Mathematical Theory of Communication by Claude Shannon +https://people.math.harvard.edu/~ctm/home/text/others/shannon/entropy/entropy.pdf + +Latent Dirichlet Allocation +https://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf + +List of Hugging Face Pipelines for NLP +https://lazyprogrammer.me/list-of-hugging-face-pipelines-for-nlp/ \ No newline at end of file From d9a3bd4f8658b2f81939527595c0528aa3cf6693 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 22 Dec 2021 01:54:55 -0500 Subject: [PATCH 261/329] update --- nlp_v2/extra_reading.txt | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/nlp_v2/extra_reading.txt b/nlp_v2/extra_reading.txt index b76f7ddc..f558808c 100644 --- a/nlp_v2/extra_reading.txt +++ b/nlp_v2/extra_reading.txt @@ -4,8 +4,23 @@ https://www.sciencedirect.com/science/article/abs/pii/S0306457302000213 A Mathematical Theory of Communication by Claude Shannon https://people.math.harvard.edu/~ctm/home/text/others/shannon/entropy/entropy.pdf +Spam Filtering with Naive Bayes – Which Naive Bayes? +http://www2.aueb.gr/users/ion/docs/ceas2006_paper.pdf + +Sentiment analysis using multinomial logistic regression +https://ieeexplore.ieee.org/document/8226700 + Latent Dirichlet Allocation https://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf List of Hugging Face Pipelines for NLP -https://lazyprogrammer.me/list-of-hugging-face-pipelines-for-nlp/ \ No newline at end of file +https://lazyprogrammer.me/list-of-hugging-face-pipelines-for-nlp/ + +Indexing by Latent Semantic Analysis (Latent Semantic Indexing) +http://lsa.colorado.edu/papers/JASIS.lsi.90.pdf + +Efficient Estimation of Word Representations in Vector Space (word2vec) +https://arxiv.org/abs/1301.3781 + +GloVe: Global Vectors for Word Representation (GloVe) +https://nlp.stanford.edu/pubs/glove.pdf \ No newline at end of file From 8c022d46763ab09b4b4e0e32e8960a470093c162 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 27 Dec 2021 20:53:52 -0500 Subject: [PATCH 262/329] update --- nlp_v2/extra_reading.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nlp_v2/extra_reading.txt b/nlp_v2/extra_reading.txt index f558808c..62a145b9 100644 --- a/nlp_v2/extra_reading.txt +++ b/nlp_v2/extra_reading.txt @@ -4,6 +4,12 @@ https://www.sciencedirect.com/science/article/abs/pii/S0306457302000213 A Mathematical Theory of Communication by Claude Shannon https://people.math.harvard.edu/~ctm/home/text/others/shannon/entropy/entropy.pdf +TextRank: Bringing Order into Texts +https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf + +Variations of the Similarity Function of TextRank for Automated Summarization +https://arxiv.org/abs/1602.03606 + Spam Filtering with Naive Bayes – Which Naive Bayes? http://www2.aueb.gr/users/ion/docs/ceas2006_paper.pdf From 011cc152a9d3dea01b3486a0b41f798ff5b507e5 Mon Sep 17 00:00:00 2001 From: User Date: Thu, 27 Jan 2022 20:15:29 -0500 Subject: [PATCH 263/329] update --- nlp_v2/extra_reading.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nlp_v2/extra_reading.txt b/nlp_v2/extra_reading.txt index 62a145b9..35ef8d05 100644 --- a/nlp_v2/extra_reading.txt +++ b/nlp_v2/extra_reading.txt @@ -10,6 +10,12 @@ https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf Variations of the Similarity Function of TextRank for Automated Summarization https://arxiv.org/abs/1602.03606 +Generic Text Summarization Using Relevance Measure and Latent Semantic Analysis +https://www.cs.bham.ac.uk/~pxt/IDA/text_summary.pdf + +Using Latent Semantic Analysis in Text Summarization and Summary Evaluation +http://textmining.zcu.cz/publications/isim.pdf + Spam Filtering with Naive Bayes – Which Naive Bayes? http://www2.aueb.gr/users/ion/docs/ceas2006_paper.pdf From ccf000866926ea85636871971603d7bfa4005379 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 12 Feb 2022 00:01:04 -0500 Subject: [PATCH 264/329] update --- nlp_v2/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nlp_v2/extra_reading.txt b/nlp_v2/extra_reading.txt index 35ef8d05..d7d382f9 100644 --- a/nlp_v2/extra_reading.txt +++ b/nlp_v2/extra_reading.txt @@ -35,4 +35,7 @@ Efficient Estimation of Word Representations in Vector Space (word2vec) https://arxiv.org/abs/1301.3781 GloVe: Global Vectors for Word Representation (GloVe) -https://nlp.stanford.edu/pubs/glove.pdf \ No newline at end of file +https://nlp.stanford.edu/pubs/glove.pdf + +Deep Learning with Tensorflow, a bit more in-depth +https://deeplearningcourses.com/c/deep-learning-tensorflow-2 \ No newline at end of file From 04d34f8d85f7e3ee881e0215e3e933de827e39a2 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 11 Mar 2022 15:11:05 -0500 Subject: [PATCH 265/329] update --- rl2/cartpole/save_a_video.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rl2/cartpole/save_a_video.py b/rl2/cartpole/save_a_video.py index 31690c29..ed34c76d 100644 --- a/rl2/cartpole/save_a_video.py +++ b/rl2/cartpole/save_a_video.py @@ -63,5 +63,6 @@ def random_search(env): plt.show() # play a final set of episodes - env = wrappers.Monitor(env, 'my_awesome_dir') + # env = wrappers.Monitor(env, 'my_awesome_dir') + env = wrappers.RecordVideo(env, 'my_awesome_dir') print("***Final run with final weights***:", play_one_episode(env, params)) From b2a08f5e319f2689f48ceeb79241adbe7e6a6981 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 14 Mar 2022 14:41:28 -0400 Subject: [PATCH 266/329] update --- README.md | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 261784af..0a5ce4a7 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,11 @@ VIP Course Links *** Note: if any of these coupons becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. FINANCEVIP2, FINANCEVIP3, etc.. +**Machine Learning: Natural Language Processing in Python (V2)** (VIP parts) + +https://deeplearningcourses.com/c/natural-language-processing-in-python + + **Time Series Analysis, Forecasting, and Machine Learning** https://www.udemy.com/course/time-series-analysis/?couponCode=TIMEVIP4 @@ -72,6 +77,9 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== +Machine Learning: Natural Language Processing in Python (V2) +https://bit.ly/3idcaE5 + Tensorflow 2.0: Deep Learning and Artificial Intelligence (non-VIP version) https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 @@ -117,7 +125,7 @@ https://deeplearningcourses.com/c/data-science-linear-regression-in-python Deep Learning Prerequisites: Logistic Regression in Python https://deeplearningcourses.com/c/data-science-logistic-regression-in-python -Deep Learning in Python +Data Science: Deep Learning and Neural Networks in Python https://deeplearningcourses.com/c/data-science-deep-learning-in-python Cluster Analysis and Unsupervised Machine Learning in Python @@ -129,10 +137,10 @@ https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-py Bayesian Machine Learning in Python: A/B Testing https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing -Easy Natural Language Processing in Python +Data Science: Natural Language Processing in Python https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python -Practical Deep Learning in Theano and TensorFlow +Modern Deep Learning in Python https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow Ensemble Machine Learning in Python: Random Forest and AdaBoost From 12ce73013375fda286ee7bfc466abbea644e0a7b Mon Sep 17 00:00:00 2001 From: User Date: Thu, 24 Mar 2022 13:28:59 -0400 Subject: [PATCH 267/329] update --- cnn_class2/siamese.py | 2 +- cnn_class2/use_pretrained_weights_resnet.py | 14 +++++++------- cnn_class2/use_pretrained_weights_vgg.py | 2 +- recommenders/autorec.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cnn_class2/siamese.py b/cnn_class2/siamese.py index a8e5894f..4c43f163 100644 --- a/cnn_class2/siamese.py +++ b/cnn_class2/siamese.py @@ -425,7 +425,7 @@ def get_test_accuracy(threshold=0.85): valid_steps = int(np.ceil(len(test_positives) * 2 / batch_size)) # fit the model -r = model.fit_generator( +r = model.fit( train_generator(), steps_per_epoch=train_steps, epochs=20, diff --git a/cnn_class2/use_pretrained_weights_resnet.py b/cnn_class2/use_pretrained_weights_resnet.py index 39bed211..8f3aae71 100644 --- a/cnn_class2/use_pretrained_weights_resnet.py +++ b/cnn_class2/use_pretrained_weights_resnet.py @@ -8,7 +8,7 @@ from keras.layers import Input, Lambda, Dense, Flatten from keras.models import Model -from keras.applications.resnet50 import ResNet50, preprocess_input +from keras.applications.resnet import ResNet50, preprocess_input # from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator @@ -21,21 +21,21 @@ # re-size all the images to this -IMAGE_SIZE = [224, 224] # feel free to change depending on dataset +IMAGE_SIZE = [100, 100] # feel free to change depending on dataset # training config: epochs = 16 batch_size = 32 # https://www.kaggle.com/paultimothymooney/blood-cells -train_path = '../large_files/blood_cell_images/TRAIN' -valid_path = '../large_files/blood_cell_images/TEST' +# train_path = '../large_files/blood_cell_images/TRAIN' +# valid_path = '../large_files/blood_cell_images/TEST' # https://www.kaggle.com/moltean/fruits # train_path = '../large_files/fruits-360/Training' # valid_path = '../large_files/fruits-360/Validation' -# train_path = '../large_files/fruits-360-small/Training' -# valid_path = '../large_files/fruits-360-small/Validation' +train_path = '../large_files/fruits-360-small/Training' +valid_path = '../large_files/fruits-360-small/Validation' # useful for getting number of files image_files = glob(train_path + '/*/*.jp*g') @@ -125,7 +125,7 @@ # fit the model -r = model.fit_generator( +r = model.fit( train_generator, validation_data=valid_generator, epochs=epochs, diff --git a/cnn_class2/use_pretrained_weights_vgg.py b/cnn_class2/use_pretrained_weights_vgg.py index 542bcb48..849dd9f6 100644 --- a/cnn_class2/use_pretrained_weights_vgg.py +++ b/cnn_class2/use_pretrained_weights_vgg.py @@ -123,7 +123,7 @@ # fit the model -r = model.fit_generator( +r = model.fit( train_generator, validation_data=valid_generator, epochs=epochs, diff --git a/recommenders/autorec.py b/recommenders/autorec.py index 02ff05b9..fa0bd415 100644 --- a/recommenders/autorec.py +++ b/recommenders/autorec.py @@ -102,7 +102,7 @@ def test_generator(A, M, A_test, M_test): ) -r = model.fit_generator( +r = model.fit( generator(A, mask), validation_data=test_generator(A_copy, mask_copy, A_test_copy, mask_test_copy), epochs=epochs, From bec06fd62d48a2dc3877e3e9a3cbe030cda6d174 Mon Sep 17 00:00:00 2001 From: User Date: Thu, 19 May 2022 16:10:40 -0400 Subject: [PATCH 268/329] update --- pytorch/extra_reading.txt | 2 +- tf2.0/extra_reading.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch/extra_reading.txt b/pytorch/extra_reading.txt index e7bdc2ea..7fccf01f 100644 --- a/pytorch/extra_reading.txt +++ b/pytorch/extra_reading.txt @@ -15,7 +15,7 @@ Implementing Dropout https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow/ Convolution arithmetic tutorial -http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html +https://theano-pymc.readthedocs.io/en/latest/tutorial/conv_arithmetic.html On the Practical Computational Power of Finite Precision RNNs for Language Recognition https://arxiv.org/abs/1805.04908 diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt index 041ff992..d84404a1 100644 --- a/tf2.0/extra_reading.txt +++ b/tf2.0/extra_reading.txt @@ -12,7 +12,7 @@ Dropout: A Simple Way to Prevent Neural Networks from Overfitting https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf Convolution arithmetic tutorial -http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html +https://theano-pymc.readthedocs.io/en/latest/tutorial/conv_arithmetic.html On the Practical Computational Power of Finite Precision RNNs for Language Recognition https://arxiv.org/abs/1805.04908 From eef2a035074367e530bf3bf2067e2211821a2ad4 Mon Sep 17 00:00:00 2001 From: User Date: Sun, 22 May 2022 14:52:36 -0400 Subject: [PATCH 269/329] update --- transformers/extra_reading.txt | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 transformers/extra_reading.txt diff --git a/transformers/extra_reading.txt b/transformers/extra_reading.txt new file mode 100644 index 00000000..718e2963 --- /dev/null +++ b/transformers/extra_reading.txt @@ -0,0 +1,32 @@ +Attention Is All You Need +https://arxiv.org/abs/1706.03762 + +BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding +https://arxiv.org/abs/1810.04805v2 + +Improving Language Understanding by Generative Pre-Training (GPT) +https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf + +Improving Language Understanding with Unsupervised Learning +https://openai.com/blog/language-unsupervised/ + +Language Models are Unsupervised Multitask Learners (GPT-2) +https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf + +Better Language Models and Their Implications +https://openai.com/blog/better-language-models/ + +Language Models are Few-Shot Learners (GPT-3) +https://arxiv.org/abs/2005.14165 + +List of Hugging Face Pipelines for NLP +https://lazyprogrammer.me/list-of-hugging-face-pipelines-for-nlp/ + +BitFit: Simple Parameter-efficient Fine-tuning for Transformer-based Masked Language-models +https://arxiv.org/abs/2106.10199 + +Translation Datasets +https://opus.nlpl.eu/KDE4.php + +Layer Normalization +https://arxiv.org/abs/1607.06450 \ No newline at end of file From b46ea2f96d1e8969d25cb3bd94c779ce8bc2e75a Mon Sep 17 00:00:00 2001 From: User Date: Mon, 23 May 2022 22:51:11 -0400 Subject: [PATCH 270/329] update --- financial_engineering/go_here_instead.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 financial_engineering/go_here_instead.txt diff --git a/financial_engineering/go_here_instead.txt b/financial_engineering/go_here_instead.txt new file mode 100644 index 00000000..614b0afd --- /dev/null +++ b/financial_engineering/go_here_instead.txt @@ -0,0 +1 @@ +https://github.com/lazyprogrammer/financial_engineering From 85bb3bf5141792dbbff04f845f21a8e0832fcef1 Mon Sep 17 00:00:00 2001 From: User Date: Thu, 9 Jun 2022 00:03:25 -0400 Subject: [PATCH 271/329] update --- README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0a5ce4a7..13af1863 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,10 @@ Beginning with Tensorflow 2, I started to use Google Colab. For those courses, u VIP Course Links =================== -*** Note: if any of these coupons becomes out of date, check my website (https://lazyprogrammer.me) for the latest version. I will probably just keep incrementing them numerically, e.g. FINANCEVIP2, FINANCEVIP3, etc.. +**Data Science: Transformers for Natural Language Processing** + +https://deeplearningcourses.com/c/data-science-transformers-nlp + **Machine Learning: Natural Language Processing in Python (V2)** (VIP parts) @@ -42,20 +45,21 @@ https://deeplearningcourses.com/c/natural-language-processing-in-python **Time Series Analysis, Forecasting, and Machine Learning** -https://www.udemy.com/course/time-series-analysis/?couponCode=TIMEVIP4 +https://deeplearningcourses.com/c/time-series-analysis **Financial Engineering and Artificial Intelligence in Python** -https://www.udemy.com/course/ai-finance/?couponCode=FINANCEVIP13 +https://deeplearningcourses.com/c/ai-finance **PyTorch: Deep Learning and Artificial Intelligence** -https://www.udemy.com/course/pytorch-deep-learning/?couponCode=PYTORCHVIP18 +https://deeplearningcourses.com/c/pytorch-deep-learning **Tensorflow 2.0: Deep Learning and Artificial Intelligence** (VIP Version) + https://deeplearningcourses.com/c/deep-learning-tensorflow-2 From 1e0d9f2f2dcfc4ced80f338ddce92980a03035af Mon Sep 17 00:00:00 2001 From: User Date: Wed, 29 Jun 2022 16:19:39 -0400 Subject: [PATCH 272/329] update --- cnn_class/extra_reading.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cnn_class/extra_reading.txt b/cnn_class/extra_reading.txt index c7cc13b8..ed16dc09 100644 --- a/cnn_class/extra_reading.txt +++ b/cnn_class/extra_reading.txt @@ -5,7 +5,7 @@ ImageNet Classification with Deep Convolutional Neural Networks https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf Convolution arithmetic tutorial -http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html +https://theano-pymc.readthedocs.io/en/latest/tutorial/conv_arithmetic.html Very Deep Convolutional Networks for Large-Scale Visual Recognition http://www.robots.ox.ac.uk/~vgg/research/very_deep/ From ca68e0a8f2ad0908e4058b641097406e1cf965e9 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 25 Jul 2022 00:58:05 -0400 Subject: [PATCH 273/329] update --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 13af1863..5f485e5d 100644 --- a/README.md +++ b/README.md @@ -81,12 +81,6 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== -Machine Learning: Natural Language Processing in Python (V2) -https://bit.ly/3idcaE5 - -Tensorflow 2.0: Deep Learning and Artificial Intelligence (non-VIP version) -https://www.udemy.com/course/deep-learning-tensorflow-2/?referralCode=E10B72D3848AB70FE1B8 - Cutting-Edge AI: Deep Reinforcement Learning in Python https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence From de294a36ee96f4955c256b38286e6ac76a516edc Mon Sep 17 00:00:00 2001 From: User Date: Mon, 25 Jul 2022 00:58:43 -0400 Subject: [PATCH 274/329] update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5f485e5d..4eebe10b 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ VIP Course Links https://deeplearningcourses.com/c/data-science-transformers-nlp -**Machine Learning: Natural Language Processing in Python (V2)** (VIP parts) +**Machine Learning: Natural Language Processing in Python (V2)** https://deeplearningcourses.com/c/natural-language-processing-in-python From 05e0c52e80d115e100e401c2f857355648a1d124 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 3 Aug 2022 15:11:09 -0400 Subject: [PATCH 275/329] update --- ann_logistic_extra/ann_train.py | 6 +++--- ann_logistic_extra/logistic_softmax_train.py | 6 +++--- ann_logistic_extra/logistic_train.py | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ann_logistic_extra/ann_train.py b/ann_logistic_extra/ann_train.py index 5c84a7e4..f4492dee 100644 --- a/ann_logistic_extra/ann_train.py +++ b/ann_logistic_extra/ann_train.py @@ -77,7 +77,7 @@ def cross_entropy(T, pY): print("Final train classification_rate:", classification_rate(Ytrain, predict(pYtrain))) print("Final test classification_rate:", classification_rate(Ytest, predict(pYtest))) -legend1, = plt.plot(train_costs, label='train cost') -legend2, = plt.plot(test_costs, label='test cost') -plt.legend([legend1, legend2]) +plt.plot(train_costs, label='train cost') +plt.plot(test_costs, label='test cost') +plt.legend() plt.show() \ No newline at end of file diff --git a/ann_logistic_extra/logistic_softmax_train.py b/ann_logistic_extra/logistic_softmax_train.py index 2bdc114f..4406626b 100644 --- a/ann_logistic_extra/logistic_softmax_train.py +++ b/ann_logistic_extra/logistic_softmax_train.py @@ -70,7 +70,7 @@ def cross_entropy(T, pY): print("Final train classification_rate:", classification_rate(Ytrain, predict(pYtrain))) print("Final test classification_rate:", classification_rate(Ytest, predict(pYtest))) -legend1, = plt.plot(train_costs, label='train cost') -legend2, = plt.plot(test_costs, label='test cost') -plt.legend([legend1, legend2]) +plt.plot(train_costs, label='train cost') +plt.plot(test_costs, label='test cost') +plt.legend() plt.show() \ No newline at end of file diff --git a/ann_logistic_extra/logistic_train.py b/ann_logistic_extra/logistic_train.py index c9a22815..abedd5ba 100644 --- a/ann_logistic_extra/logistic_train.py +++ b/ann_logistic_extra/logistic_train.py @@ -55,9 +55,9 @@ def cross_entropy(T, pY): print("Final train classification_rate:", classification_rate(Ytrain, np.round(pYtrain))) print("Final test classification_rate:", classification_rate(Ytest, np.round(pYtest))) -legend1, = plt.plot(train_costs, label='train cost') -legend2, = plt.plot(test_costs, label='test cost') -plt.legend([legend1, legend2]) +plt.plot(train_costs, label='train cost') +plt.plot(test_costs, label='test cost') +plt.legend() plt.show() From fa46455ebfed155957c54203015c6be689ce9878 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 3 Sep 2022 23:21:59 -0400 Subject: [PATCH 276/329] prophet --- prophet/extra_reading.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 prophet/extra_reading.txt diff --git a/prophet/extra_reading.txt b/prophet/extra_reading.txt new file mode 100644 index 00000000..1e2ea58c --- /dev/null +++ b/prophet/extra_reading.txt @@ -0,0 +1,2 @@ +Forecasting at Scale (Facebook Prophet) +https://peerj.com/preprints/3190.pdf \ No newline at end of file From ac0d8a50caef25f7613c5bc432286db833f07a77 Mon Sep 17 00:00:00 2001 From: User Date: Fri, 16 Sep 2022 19:16:04 -0400 Subject: [PATCH 277/329] update --- cnn_class/WHERE ARE THE NOTEBOOKS.txt | 3 +++ cnn_class2/WHERE ARE THE NOTEBOOKS.txt | 3 +++ nlp_v2/WHERE ARE THE NOTEBOOKS.txt | 3 +++ pytorch/WHERE ARE THE NOTEBOOKS.txt | 3 +++ rnn_class/WHERE ARE THE NOTEBOOKS.txt | 3 +++ tf2.0/WHERE ARE THE NOTEBOOKS.txt | 3 +++ timeseries/WHERE ARE THE NOTEBOOKS.txt | 3 +++ transformers/WHERE ARE THE NOTEBOOKS.txt | 3 +++ 8 files changed, 24 insertions(+) create mode 100644 cnn_class/WHERE ARE THE NOTEBOOKS.txt create mode 100644 cnn_class2/WHERE ARE THE NOTEBOOKS.txt create mode 100644 nlp_v2/WHERE ARE THE NOTEBOOKS.txt create mode 100644 pytorch/WHERE ARE THE NOTEBOOKS.txt create mode 100644 rnn_class/WHERE ARE THE NOTEBOOKS.txt create mode 100644 tf2.0/WHERE ARE THE NOTEBOOKS.txt create mode 100644 timeseries/WHERE ARE THE NOTEBOOKS.txt create mode 100644 transformers/WHERE ARE THE NOTEBOOKS.txt diff --git a/cnn_class/WHERE ARE THE NOTEBOOKS.txt b/cnn_class/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/cnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/cnn_class2/WHERE ARE THE NOTEBOOKS.txt b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/nlp_v2/WHERE ARE THE NOTEBOOKS.txt b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/pytorch/WHERE ARE THE NOTEBOOKS.txt b/pytorch/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/pytorch/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/rnn_class/WHERE ARE THE NOTEBOOKS.txt b/rnn_class/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/rnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/tf2.0/WHERE ARE THE NOTEBOOKS.txt b/tf2.0/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/tf2.0/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/timeseries/WHERE ARE THE NOTEBOOKS.txt b/timeseries/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/timeseries/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/transformers/WHERE ARE THE NOTEBOOKS.txt b/transformers/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/transformers/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file From 57e38751087a9fcc5ac8bbc8a01fae64fd1a82cf Mon Sep 17 00:00:00 2001 From: User Date: Tue, 18 Oct 2022 00:37:32 -0400 Subject: [PATCH 278/329] update --- rl/cartpole.py | 30 ++++---- rl/cartpole_gym0.19.py | 153 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+), 13 deletions(-) create mode 100644 rl/cartpole_gym0.19.py diff --git a/rl/cartpole.py b/rl/cartpole.py index 2ef157b5..ca1afd85 100644 --- a/rl/cartpole.py +++ b/rl/cartpole.py @@ -29,14 +29,15 @@ def epsilon_greedy(model, s, eps=0.1): def gather_samples(env, n_episodes=10000): samples = [] for _ in range(n_episodes): - s = env.reset() + s, info = env.reset() done = False - while not done: + truncated = False + while not (done or truncated): a = env.action_space.sample() sa = np.concatenate((s, [a])) samples.append(sa) - s, r, done, info = env.step(a) + s, r, done, truncated, info = env.step(a) return samples @@ -70,11 +71,12 @@ def test_agent(model, env, n_episodes=20): reward_per_episode = np.zeros(n_episodes) for it in range(n_episodes): done = False + truncated = False episode_reward = 0 - s = env.reset() - while not done: + s, info = env.reset() + while not (done or truncated): a = epsilon_greedy(model, s, eps=0) - s, r, done, info = env.step(a) + s, r, done, truncated, info = env.step(a) episode_reward += r reward_per_episode[it] = episode_reward return np.mean(reward_per_episode) @@ -82,11 +84,12 @@ def test_agent(model, env, n_episodes=20): def watch_agent(model, env, eps): done = False + truncated = False episode_reward = 0 - s = env.reset() - while not done: + s, info = env.reset() + while not (done or truncated): a = epsilon_greedy(model, s, eps=eps) - s, r, done, info = env.step(a) + s, r, done, truncated, info = env.step(a) env.render() episode_reward += r print("Episode reward:", episode_reward) @@ -94,7 +97,7 @@ def watch_agent(model, env, eps): if __name__ == '__main__': # instantiate environment - env = gym.make("CartPole-v0") + env = gym.make("CartPole-v1", render_mode="rgb_array") model = Model(env) reward_per_episode = [] @@ -105,12 +108,13 @@ def watch_agent(model, env, eps): # repeat until convergence n_episodes = 1500 for it in range(n_episodes): - s = env.reset() + s, info = env.reset() episode_reward = 0 done = False - while not done: + truncated = False + while not (done or truncated): a = epsilon_greedy(model, s) - s2, r, done, info = env.step(a) + s2, r, done, truncated, info = env.step(a) # get the target if done: diff --git a/rl/cartpole_gym0.19.py b/rl/cartpole_gym0.19.py new file mode 100644 index 00000000..2ef157b5 --- /dev/null +++ b/rl/cartpole_gym0.19.py @@ -0,0 +1,153 @@ +# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python +# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python +from __future__ import print_function, division +from builtins import range +# Note: you may need to update your version of future +# sudo pip install -U future + +import gym +import numpy as np +import matplotlib.pyplot as plt +from sklearn.kernel_approximation import RBFSampler + + +GAMMA = 0.99 +ALPHA = 0.1 + + +def epsilon_greedy(model, s, eps=0.1): + # we'll use epsilon-soft to ensure all states are visited + # what happens if you don't do this? i.e. eps=0 + p = np.random.random() + if p < (1 - eps): + values = model.predict_all_actions(s) + return np.argmax(values) + else: + return model.env.action_space.sample() + + +def gather_samples(env, n_episodes=10000): + samples = [] + for _ in range(n_episodes): + s = env.reset() + done = False + while not done: + a = env.action_space.sample() + sa = np.concatenate((s, [a])) + samples.append(sa) + + s, r, done, info = env.step(a) + return samples + + +class Model: + def __init__(self, env): + # fit the featurizer to data + self.env = env + samples = gather_samples(env) + self.featurizer = RBFSampler() + self.featurizer.fit(samples) + dims = self.featurizer.n_components + + # initialize linear model weights + self.w = np.zeros(dims) + + def predict(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x @ self.w + + def predict_all_actions(self, s): + return [self.predict(s, a) for a in range(self.env.action_space.n)] + + def grad(self, s, a): + sa = np.concatenate((s, [a])) + x = self.featurizer.transform([sa])[0] + return x + + +def test_agent(model, env, n_episodes=20): + reward_per_episode = np.zeros(n_episodes) + for it in range(n_episodes): + done = False + episode_reward = 0 + s = env.reset() + while not done: + a = epsilon_greedy(model, s, eps=0) + s, r, done, info = env.step(a) + episode_reward += r + reward_per_episode[it] = episode_reward + return np.mean(reward_per_episode) + + +def watch_agent(model, env, eps): + done = False + episode_reward = 0 + s = env.reset() + while not done: + a = epsilon_greedy(model, s, eps=eps) + s, r, done, info = env.step(a) + env.render() + episode_reward += r + print("Episode reward:", episode_reward) + + +if __name__ == '__main__': + # instantiate environment + env = gym.make("CartPole-v0") + + model = Model(env) + reward_per_episode = [] + + # watch untrained agent + watch_agent(model, env, eps=0) + + # repeat until convergence + n_episodes = 1500 + for it in range(n_episodes): + s = env.reset() + episode_reward = 0 + done = False + while not done: + a = epsilon_greedy(model, s) + s2, r, done, info = env.step(a) + + # get the target + if done: + target = r + else: + values = model.predict_all_actions(s2) + target = r + GAMMA * np.max(values) + + # update the model + g = model.grad(s, a) + err = target - model.predict(s, a) + model.w += ALPHA * err * g + + # accumulate reward + episode_reward += r + + # update state + s = s2 + + if (it + 1) % 50 == 0: + print(f"Episode: {it + 1}, Reward: {episode_reward}") + + # early exit + if it > 20 and np.mean(reward_per_episode[-20:]) == 200: + print("Early exit") + break + + reward_per_episode.append(episode_reward) + + # test trained agent + test_reward = test_agent(model, env) + print(f"Average test reward: {test_reward}") + + plt.plot(reward_per_episode) + plt.title("Reward per episode") + plt.show() + + # watch trained agent + watch_agent(model, env, eps=0) + From 191ab2c7140e336002d529bc1eab7cb6d8e91749 Mon Sep 17 00:00:00 2001 From: User Date: Tue, 18 Oct 2022 00:45:29 -0400 Subject: [PATCH 279/329] update --- rl/cartpole.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rl/cartpole.py b/rl/cartpole.py index ca1afd85..abb1b617 100644 --- a/rl/cartpole.py +++ b/rl/cartpole.py @@ -90,7 +90,6 @@ def watch_agent(model, env, eps): while not (done or truncated): a = epsilon_greedy(model, s, eps=eps) s, r, done, truncated, info = env.step(a) - env.render() episode_reward += r print("Episode reward:", episode_reward) @@ -153,5 +152,6 @@ def watch_agent(model, env, eps): plt.show() # watch trained agent + env = gym.make("CartPole-v1", render_mode="human") watch_agent(model, env, eps=0) From f9653f6717faa5e6de4e19422ddf4332d159ced8 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 9 Nov 2022 02:59:11 -0500 Subject: [PATCH 280/329] naive bayes --- cnn_class/WHERE ARE THE NOTEBOOKS.txt | 2 +- cnn_class2/WHERE ARE THE NOTEBOOKS.txt | 2 +- naive_bayes/WHERE ARE THE NOTEBOOKS.txt | 3 +++ naive_bayes/extra_reading.txt | 8 ++++++++ nlp_v2/WHERE ARE THE NOTEBOOKS.txt | 2 +- pytorch/WHERE ARE THE NOTEBOOKS.txt | 2 +- tf2.0/WHERE ARE THE NOTEBOOKS.txt | 2 +- timeseries/WHERE ARE THE NOTEBOOKS.txt | 2 +- transformers/WHERE ARE THE NOTEBOOKS.txt | 2 +- 9 files changed, 18 insertions(+), 7 deletions(-) create mode 100644 naive_bayes/WHERE ARE THE NOTEBOOKS.txt create mode 100644 naive_bayes/extra_reading.txt diff --git a/cnn_class/WHERE ARE THE NOTEBOOKS.txt b/cnn_class/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/cnn_class/WHERE ARE THE NOTEBOOKS.txt +++ b/cnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/cnn_class2/WHERE ARE THE NOTEBOOKS.txt b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/cnn_class2/WHERE ARE THE NOTEBOOKS.txt +++ b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/naive_bayes/WHERE ARE THE NOTEBOOKS.txt b/naive_bayes/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..4b0a3f50 --- /dev/null +++ b/naive_bayes/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/naive_bayes/extra_reading.txt b/naive_bayes/extra_reading.txt new file mode 100644 index 00000000..52e5228b --- /dev/null +++ b/naive_bayes/extra_reading.txt @@ -0,0 +1,8 @@ +Complement Naive Bayes +https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf + +Semi-Supervised Learning with Naive Bayes +http://www.kamalnigam.com/papers/emcat-aaai98.pdf + +An empirical study of the naive Bayes classifier +https://faculty.cc.gatech.edu/~isbell/reading/papers/Rish.pdf \ No newline at end of file diff --git a/nlp_v2/WHERE ARE THE NOTEBOOKS.txt b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/nlp_v2/WHERE ARE THE NOTEBOOKS.txt +++ b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/pytorch/WHERE ARE THE NOTEBOOKS.txt b/pytorch/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/pytorch/WHERE ARE THE NOTEBOOKS.txt +++ b/pytorch/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/tf2.0/WHERE ARE THE NOTEBOOKS.txt b/tf2.0/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/tf2.0/WHERE ARE THE NOTEBOOKS.txt +++ b/tf2.0/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/timeseries/WHERE ARE THE NOTEBOOKS.txt b/timeseries/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/timeseries/WHERE ARE THE NOTEBOOKS.txt +++ b/timeseries/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/transformers/WHERE ARE THE NOTEBOOKS.txt b/transformers/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..4b0a3f50 100644 --- a/transformers/WHERE ARE THE NOTEBOOKS.txt +++ b/transformers/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,3 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! Please watch it again, and follow the instructions. \ No newline at end of file From a090466b65fa1e07ffc24c70c508f74d87312336 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 12 Nov 2022 14:06:06 -0500 Subject: [PATCH 281/329] update --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 4eebe10b..a2e3a8d3 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,9 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== +Data Science & Machine Learning: Naive Bayes in Python +https://deeplearningcourses.com/c/data-science-machine-learning-naive-bayes-in-python + Cutting-Edge AI: Deep Reinforcement Learning in Python https://deeplearningcourses.com/c/cutting-edge-artificial-intelligence From 241bbbacba7da4fde17446fe039200bf8cd9dbe9 Mon Sep 17 00:00:00 2001 From: User Date: Sat, 3 Dec 2022 14:41:18 -0500 Subject: [PATCH 282/329] add bayesian ml links --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index a2e3a8d3..2d2cdd1c 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,12 @@ https://deeplearningcourses.com/c/deep-learning-tensorflow-2 Deep Learning Courses Exclusives ================================ +Data Science: Bayesian Linear Regression in Python +https://deeplearningcourses.com/c/bayesian-linear-regression-in-python + +Data Science: Bayesian Classification in Python +https://deeplearningcourses.com/c/bayesian-classification-in-python + Classical Statistical Inference and A/B Testing in Python https://deeplearningcourses.com/c/statistical-inference-in-python From 0b06cae0032f06f366b537a8162be031cf5d074e Mon Sep 17 00:00:00 2001 From: User Date: Tue, 6 Dec 2022 14:26:08 -0500 Subject: [PATCH 283/329] update --- ann_logistic_extra/ann_train.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ann_logistic_extra/ann_train.py b/ann_logistic_extra/ann_train.py index f4492dee..02c5ac4c 100644 --- a/ann_logistic_extra/ann_train.py +++ b/ann_logistic_extra/ann_train.py @@ -66,11 +66,15 @@ def cross_entropy(T, pY): test_costs.append(ctest) # gradient descent - W2 -= learning_rate*Ztrain.T.dot(pYtrain - Ytrain_ind) - b2 -= learning_rate*(pYtrain - Ytrain_ind).sum(axis=0) - dZ = (pYtrain - Ytrain_ind).dot(W2.T) * (1 - Ztrain*Ztrain) - W1 -= learning_rate*Xtrain.T.dot(dZ) - b1 -= learning_rate*dZ.sum(axis=0) + gW2 = Ztrain.T.dot(pYtrain - Ytrain_ind) + gb2 = (pYtrain - Ytrain_ind).sum(axis=0) + dZ = (pYtrain - Ytrain_ind).dot(W2.T) * (1 - Ztrain * Ztrain) + gW1 = Xtrain.T.dot(dZ) + gb1 = dZ.sum(axis=0) + W2 -= learning_rate * gW2 + b2 -= learning_rate * gb2 + W1 -= learning_rate * gW1 + b1 -= learning_rate * gb1 if i % 1000 == 0: print(i, ctrain, ctest) From 0cf1af080d662d22900a9e0bac22f6052c678621 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 12 Dec 2022 19:40:04 -0500 Subject: [PATCH 284/329] update --- ann_logistic_extra/ann_train.py | 4 ++-- ann_logistic_extra/logistic_softmax_train.py | 4 ++-- ann_logistic_extra/process.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ann_logistic_extra/ann_train.py b/ann_logistic_extra/ann_train.py index 02c5ac4c..15710e76 100644 --- a/ann_logistic_extra/ann_train.py +++ b/ann_logistic_extra/ann_train.py @@ -48,8 +48,8 @@ def predict(P_Y_given_X): def classification_rate(Y, P): return np.mean(Y == P) -def cross_entropy(T, pY): - return -np.mean(T*np.log(pY)) +def cross_entropy(Y, pY): + return -np.sum(Y * np.log(pY)) / len(T) # train loop diff --git a/ann_logistic_extra/logistic_softmax_train.py b/ann_logistic_extra/logistic_softmax_train.py index 4406626b..94874f14 100644 --- a/ann_logistic_extra/logistic_softmax_train.py +++ b/ann_logistic_extra/logistic_softmax_train.py @@ -44,8 +44,8 @@ def predict(P_Y_given_X): def classification_rate(Y, P): return np.mean(Y == P) -def cross_entropy(T, pY): - return -np.mean(T*np.log(pY)) +def cross_entropy(Y, pY): + return -np.sum(Y * np.log(pY)) / len(Y) # train loop diff --git a/ann_logistic_extra/process.py b/ann_logistic_extra/process.py index 785755b7..0048f9e0 100644 --- a/ann_logistic_extra/process.py +++ b/ann_logistic_extra/process.py @@ -21,7 +21,7 @@ def get_data(): # df.head() # easier to work with numpy array - data = df.values + data = df.to_numpy() # shuffle it np.random.shuffle(data) From 5c08159b1f61a550a5501075b617fc2e1f9a4210 Mon Sep 17 00:00:00 2001 From: User Date: Sun, 1 Jan 2023 20:11:28 -0500 Subject: [PATCH 285/329] check --- nlp_class2/neural_network2.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/nlp_class2/neural_network2.py b/nlp_class2/neural_network2.py index 573dd9e9..159dc571 100644 --- a/nlp_class2/neural_network2.py +++ b/nlp_class2/neural_network2.py @@ -96,18 +96,23 @@ def softmax(a): # # original: W1 = W1 - lr * inputs.T.dot(dhidden) # VxN NxD --> VxD # fastest way + W1_copy = W1.copy() np.subtract.at(W1, inputs, lr * dhidden) - # test this - # i = 0 - # for w in inputs: # don't include end token - # W1[w] = W1[w] - lr * dhidden[i] - # i += 1 - # vs this + # W1_test = W1_copy.copy() # oh_inputs = np.zeros((n - 1, V)) # oh_inputs[np.arange(n - 1), sentence[:n-1]] = 1 - # W1 = W1 - lr * oh_inputs.T.dot(dhidden) + # W1_test = W1_test - lr * oh_inputs.T.dot(dhidden) + # assert(np.allclose(W1_test, W1)) + + # vs this + # W1_test = W1_copy.copy() + # i = 0 + # for w in inputs: # don't include end token + # W1_test[w] = W1_test[w] - lr * dhidden[i] + # i += 1 + # assert(np.allclose(W1_test, W1)) # keep track of the bigram loss # only do it for the first epoch to avoid redundancy From 62afaad08110ae7c8440153ec22fc02ce31f0117 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 15 Feb 2023 02:22:13 -0500 Subject: [PATCH 286/329] update --- unsupervised_class/kmeans_mnist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsupervised_class/kmeans_mnist.py b/unsupervised_class/kmeans_mnist.py index fd7ca76e..b399afab 100644 --- a/unsupervised_class/kmeans_mnist.py +++ b/unsupervised_class/kmeans_mnist.py @@ -16,7 +16,7 @@ import numpy as np import pandas as pd import matplotlib.pyplot as plt -from kmeans import plot_k_means, get_simple_data +from .kmeans import plot_k_means, get_simple_data from datetime import datetime def get_data(limit=None): From 95bed3cbec89a130bcc945347848907d7f9ae5ab Mon Sep 17 00:00:00 2001 From: User Date: Tue, 21 Feb 2023 23:48:13 -0500 Subject: [PATCH 287/329] calculus --- README.md | 3 +++ calculus/WHERE ARE THE NOTEBOOKS.txt | 3 +++ calculus/extra_reading.txt | 2 ++ 3 files changed, 8 insertions(+) create mode 100644 calculus/WHERE ARE THE NOTEBOOKS.txt create mode 100644 calculus/extra_reading.txt diff --git a/README.md b/README.md index 2d2cdd1c..5a104fcc 100644 --- a/README.md +++ b/README.md @@ -87,6 +87,9 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== +Math 0-1: Calculus for Data Science & Machine Learning +https://deeplearningcourses.com/c/calculus-data-science + Data Science & Machine Learning: Naive Bayes in Python https://deeplearningcourses.com/c/data-science-machine-learning-naive-bayes-in-python diff --git a/calculus/WHERE ARE THE NOTEBOOKS.txt b/calculus/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..4b0a3f50 --- /dev/null +++ b/calculus/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/calculus/extra_reading.txt b/calculus/extra_reading.txt new file mode 100644 index 00000000..404cc6d0 --- /dev/null +++ b/calculus/extra_reading.txt @@ -0,0 +1,2 @@ +Calculus: Early Transcendentals +https://amzn.to/3Kwmabe \ No newline at end of file From 92633f0c8dd519d728898dfd6b2307a765a4ee57 Mon Sep 17 00:00:00 2001 From: User Date: Wed, 15 Mar 2023 02:31:09 -0400 Subject: [PATCH 288/329] update --- unsupervised_class3/bayes_classifier_gmm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsupervised_class3/bayes_classifier_gmm.py b/unsupervised_class3/bayes_classifier_gmm.py index b129c522..f3c7dd0f 100644 --- a/unsupervised_class3/bayes_classifier_gmm.py +++ b/unsupervised_class3/bayes_classifier_gmm.py @@ -28,7 +28,7 @@ def fit(self, X, Y): print("Fitting gmm", k) Xk = X[Y == k] self.p_y[k] = len(Xk) - gmm = BayesianGaussianMixture(10) + gmm = BayesianGaussianMixture(n_components=10) gmm.fit(Xk) self.gaussians.append(gmm) # normalize p(y) From aba65a1819ae84940614d1523ed56b59e68499bc Mon Sep 17 00:00:00 2001 From: User Date: Mon, 27 Mar 2023 02:00:28 -0400 Subject: [PATCH 289/329] update --- chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt | 3 +++ chatgpt_trading/extra_reading.txt | 5 +++++ 2 files changed, 8 insertions(+) create mode 100644 chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt create mode 100644 chatgpt_trading/extra_reading.txt diff --git a/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt b/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..8d29101d --- /dev/null +++ b/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the code" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/chatgpt_trading/extra_reading.txt b/chatgpt_trading/extra_reading.txt new file mode 100644 index 00000000..59a5da5d --- /dev/null +++ b/chatgpt_trading/extra_reading.txt @@ -0,0 +1,5 @@ +ARIMA (for mean reversion) +https://deeplearningcourses.com/c/time-series-analysis + +Financial Engineering +https://deeplearningcourses.com/c/ai-finance \ No newline at end of file From 1b49c9d7596a405410167e1163c04e2f9f9ac2d7 Mon Sep 17 00:00:00 2001 From: User Date: Mon, 27 Mar 2023 02:26:45 -0400 Subject: [PATCH 290/329] update --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 5a104fcc..1e1737a9 100644 --- a/README.md +++ b/README.md @@ -87,6 +87,9 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== +Financial Analysis: Build a ChatGPT Pairs Trading Bot +https://deeplearningcourses.com/c/chatgpt-pairs-trading + Math 0-1: Calculus for Data Science & Machine Learning https://deeplearningcourses.com/c/calculus-data-science From 1ba92b9ffc392138fb33f29f41b399c225c05d8c Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 20 Jul 2023 01:42:06 -0400 Subject: [PATCH 291/329] update --- linear_algebra/WHERE ARE THE NOTEBOOKS.txt | 3 +++ linear_algebra/extra_reading.txt | 6 ++++++ 2 files changed, 9 insertions(+) create mode 100644 linear_algebra/WHERE ARE THE NOTEBOOKS.txt create mode 100644 linear_algebra/extra_reading.txt diff --git a/linear_algebra/WHERE ARE THE NOTEBOOKS.txt b/linear_algebra/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..4b0a3f50 --- /dev/null +++ b/linear_algebra/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,3 @@ +If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! + +Please watch it again, and follow the instructions. \ No newline at end of file diff --git a/linear_algebra/extra_reading.txt b/linear_algebra/extra_reading.txt new file mode 100644 index 00000000..865e98be --- /dev/null +++ b/linear_algebra/extra_reading.txt @@ -0,0 +1,6 @@ +Introduction to Linear Algebra by Gilbert Strang +https://amzn.to/2G3bvW1 + +Still Don't Understand Gravity? This Will Help +- this is included not because it's about calculus, but because it's yet another educator explaining why practice is important +https://www.youtube.com/watch?v=cP2uVarXi1A \ No newline at end of file From d2c4651859248879204faf58f89370f8ed6b8182 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 8 Aug 2023 23:26:11 -0400 Subject: [PATCH 292/329] update --- kerascv/imagenet_label_names.json | 1000 +++++++++++++++++++++++++++++ 1 file changed, 1000 insertions(+) create mode 100644 kerascv/imagenet_label_names.json diff --git a/kerascv/imagenet_label_names.json b/kerascv/imagenet_label_names.json new file mode 100644 index 00000000..37eeb166 --- /dev/null +++ b/kerascv/imagenet_label_names.json @@ -0,0 +1,1000 @@ +["tench", +"goldfish", +"great white shark", +"tiger shark", +"hammerhead shark", +"electric ray", +"stingray", +"cock", +"hen", +"ostrich", +"brambling", +"goldfinch", +"house finch", +"junco", +"indigo bunting", +"American robin", +"bulbul", +"jay", +"magpie", +"chickadee", +"American dipper", +"kite", +"bald eagle", +"vulture", +"great grey owl", +"fire salamander", +"smooth newt", +"newt", +"spotted salamander", +"axolotl", +"American bullfrog", +"tree frog", +"tailed frog", +"loggerhead sea turtle", +"leatherback sea turtle", +"mud turtle", +"terrapin", +"box turtle", +"banded gecko", +"green iguana", +"Carolina anole", +"desert grassland whiptail lizard", +"agama", +"frilled-necked lizard", +"alligator lizard", +"Gila monster", +"European green lizard", +"chameleon", +"Komodo dragon", +"Nile crocodile", +"American alligator", +"triceratops", +"worm snake", +"ring-necked snake", +"eastern hog-nosed snake", +"smooth green snake", +"kingsnake", +"garter snake", +"water snake", +"vine snake", +"night snake", +"boa constrictor", +"African rock python", +"Indian cobra", +"green mamba", +"sea snake", +"Saharan horned viper", +"eastern diamondback rattlesnake", +"sidewinder", +"trilobite", +"harvestman", +"scorpion", +"yellow garden spider", +"barn spider", +"European garden spider", +"southern black widow", +"tarantula", +"wolf spider", +"tick", +"centipede", +"black grouse", +"ptarmigan", +"ruffed grouse", +"prairie grouse", +"peacock", +"quail", +"partridge", +"grey parrot", +"macaw", +"sulphur-crested cockatoo", +"lorikeet", +"coucal", +"bee eater", +"hornbill", +"hummingbird", +"jacamar", +"toucan", +"duck", +"red-breasted merganser", +"goose", +"black swan", +"tusker", +"echidna", +"platypus", +"wallaby", +"koala", +"wombat", +"jellyfish", +"sea anemone", +"brain coral", +"flatworm", +"nematode", +"conch", +"snail", +"slug", +"sea slug", +"chiton", +"chambered nautilus", +"Dungeness crab", +"rock crab", +"fiddler crab", +"red king crab", +"American lobster", +"spiny lobster", +"crayfish", +"hermit crab", +"isopod", +"white stork", +"black stork", +"spoonbill", +"flamingo", +"little blue heron", +"great egret", +"bittern", +"crane (bird)", +"limpkin", +"common gallinule", +"American coot", +"bustard", +"ruddy turnstone", +"dunlin", +"common redshank", +"dowitcher", +"oystercatcher", +"pelican", +"king penguin", +"albatross", +"grey whale", +"killer whale", +"dugong", +"sea lion", +"Chihuahua", +"Japanese Chin", +"Maltese", +"Pekingese", +"Shih Tzu", +"King Charles Spaniel", +"Papillon", +"toy terrier", +"Rhodesian Ridgeback", +"Afghan Hound", +"Basset Hound", +"Beagle", +"Bloodhound", +"Bluetick Coonhound", +"Black and Tan Coonhound", +"Treeing Walker Coonhound", +"English foxhound", +"Redbone Coonhound", +"borzoi", +"Irish Wolfhound", +"Italian Greyhound", +"Whippet", +"Ibizan Hound", +"Norwegian Elkhound", +"Otterhound", +"Saluki", +"Scottish Deerhound", +"Weimaraner", +"Staffordshire Bull Terrier", +"American Staffordshire Terrier", +"Bedlington Terrier", +"Border Terrier", +"Kerry Blue Terrier", +"Irish Terrier", +"Norfolk Terrier", +"Norwich Terrier", +"Yorkshire Terrier", +"Wire Fox Terrier", +"Lakeland Terrier", +"Sealyham Terrier", +"Airedale Terrier", +"Cairn Terrier", +"Australian Terrier", +"Dandie Dinmont Terrier", +"Boston Terrier", +"Miniature Schnauzer", +"Giant Schnauzer", +"Standard Schnauzer", +"Scottish Terrier", +"Tibetan Terrier", +"Australian Silky Terrier", +"Soft-coated Wheaten Terrier", +"West Highland White Terrier", +"Lhasa Apso", +"Flat-Coated Retriever", +"Curly-coated Retriever", +"Golden Retriever", +"Labrador Retriever", +"Chesapeake Bay Retriever", +"German Shorthaired Pointer", +"Vizsla", +"English Setter", +"Irish Setter", +"Gordon Setter", +"Brittany", +"Clumber Spaniel", +"English Springer Spaniel", +"Welsh Springer Spaniel", +"Cocker Spaniels", +"Sussex Spaniel", +"Irish Water Spaniel", +"Kuvasz", +"Schipperke", +"Groenendael", +"Malinois", +"Briard", +"Australian Kelpie", +"Komondor", +"Old English Sheepdog", +"Shetland Sheepdog", +"collie", +"Border Collie", +"Bouvier des Flandres", +"Rottweiler", +"German Shepherd Dog", +"Dobermann", +"Miniature Pinscher", +"Greater Swiss Mountain Dog", +"Bernese Mountain Dog", +"Appenzeller Sennenhund", +"Entlebucher Sennenhund", +"Boxer", +"Bullmastiff", +"Tibetan Mastiff", +"French Bulldog", +"Great Dane", +"St. Bernard", +"husky", +"Alaskan Malamute", +"Siberian Husky", +"Dalmatian", +"Affenpinscher", +"Basenji", +"pug", +"Leonberger", +"Newfoundland", +"Pyrenean Mountain Dog", +"Samoyed", +"Pomeranian", +"Chow Chow", +"Keeshond", +"Griffon Bruxellois", +"Pembroke Welsh Corgi", +"Cardigan Welsh Corgi", +"Toy Poodle", +"Miniature Poodle", +"Standard Poodle", +"Mexican hairless dog", +"grey wolf", +"Alaskan tundra wolf", +"red wolf", +"coyote", +"dingo", +"dhole", +"African wild dog", +"hyena", +"red fox", +"kit fox", +"Arctic fox", +"grey fox", +"tabby cat", +"tiger cat", +"Persian cat", +"Siamese cat", +"Egyptian Mau", +"cougar", +"lynx", +"leopard", +"snow leopard", +"jaguar", +"lion", +"tiger", +"cheetah", +"brown bear", +"American black bear", +"polar bear", +"sloth bear", +"mongoose", +"meerkat", +"tiger beetle", +"ladybug", +"ground beetle", +"longhorn beetle", +"leaf beetle", +"dung beetle", +"rhinoceros beetle", +"weevil", +"fly", +"bee", +"ant", +"grasshopper", +"cricket", +"stick insect", +"cockroach", +"mantis", +"cicada", +"leafhopper", +"lacewing", +"dragonfly", +"damselfly", +"red admiral", +"ringlet", +"monarch butterfly", +"small white", +"sulphur butterfly", +"gossamer-winged butterfly", +"starfish", +"sea urchin", +"sea cucumber", +"cottontail rabbit", +"hare", +"Angora rabbit", +"hamster", +"porcupine", +"fox squirrel", +"marmot", +"beaver", +"guinea pig", +"common sorrel", +"zebra", +"pig", +"wild boar", +"warthog", +"hippopotamus", +"ox", +"water buffalo", +"bison", +"ram", +"bighorn sheep", +"Alpine ibex", +"hartebeest", +"impala", +"gazelle", +"dromedary", +"llama", +"weasel", +"mink", +"European polecat", +"black-footed ferret", +"otter", +"skunk", +"badger", +"armadillo", +"three-toed sloth", +"orangutan", +"gorilla", +"chimpanzee", +"gibbon", +"siamang", +"guenon", +"patas monkey", +"baboon", +"macaque", +"langur", +"black-and-white colobus", +"proboscis monkey", +"marmoset", +"white-headed capuchin", +"howler monkey", +"titi", +"Geoffroy's spider monkey", +"common squirrel monkey", +"ring-tailed lemur", +"indri", +"Asian elephant", +"African bush elephant", +"red panda", +"giant panda", +"snoek", +"eel", +"coho salmon", +"rock beauty", +"clownfish", +"sturgeon", +"garfish", +"lionfish", +"pufferfish", +"abacus", +"abaya", +"academic gown", +"accordion", +"acoustic guitar", +"aircraft carrier", +"airliner", +"airship", +"altar", +"ambulance", +"amphibious vehicle", +"analog clock", +"apiary", +"apron", +"waste container", +"assault rifle", +"backpack", +"bakery", +"balance beam", +"balloon", +"ballpoint pen", +"Band-Aid", +"banjo", +"baluster", +"barbell", +"barber chair", +"barbershop", +"barn", +"barometer", +"barrel", +"wheelbarrow", +"baseball", +"basketball", +"bassinet", +"bassoon", +"swimming cap", +"bath towel", +"bathtub", +"station wagon", +"lighthouse", +"beaker", +"military cap", +"beer bottle", +"beer glass", +"bell-cot", +"bib", +"tandem bicycle", +"bikini", +"ring binder", +"binoculars", +"birdhouse", +"boathouse", +"bobsleigh", +"bolo tie", +"poke bonnet", +"bookcase", +"bookstore", +"bottle cap", +"bow", +"bow tie", +"brass", +"bra", +"breakwater", +"breastplate", +"broom", +"bucket", +"buckle", +"bulletproof vest", +"high-speed train", +"butcher shop", +"taxicab", +"cauldron", +"candle", +"cannon", +"canoe", +"can opener", +"cardigan", +"car mirror", +"carousel", +"tool kit", +"carton", +"car wheel", +"automated teller machine", +"cassette", +"cassette player", +"castle", +"catamaran", +"CD player", +"cello", +"mobile phone", +"chain", +"chain-link fence", +"chain mail", +"chainsaw", +"chest", +"chiffonier", +"chime", +"china cabinet", +"Christmas stocking", +"church", +"movie theater", +"cleaver", +"cliff dwelling", +"cloak", +"clogs", +"cocktail shaker", +"coffee mug", +"coffeemaker", +"coil", +"combination lock", +"computer keyboard", +"confectionery store", +"container ship", +"convertible", +"corkscrew", +"cornet", +"cowboy boot", +"cowboy hat", +"cradle", +"crane (machine)", +"crash helmet", +"crate", +"infant bed", +"Crock Pot", +"croquet ball", +"crutch", +"cuirass", +"dam", +"desk", +"desktop computer", +"rotary dial telephone", +"diaper", +"digital clock", +"digital watch", +"dining table", +"dishcloth", +"dishwasher", +"disc brake", +"dock", +"dog sled", +"dome", +"doormat", +"drilling rig", +"drum", +"drumstick", +"dumbbell", +"Dutch oven", +"electric fan", +"electric guitar", +"electric locomotive", +"entertainment center", +"envelope", +"espresso machine", +"face powder", +"feather boa", +"filing cabinet", +"fireboat", +"fire engine", +"fire screen sheet", +"flagpole", +"flute", +"folding chair", +"football helmet", +"forklift", +"fountain", +"fountain pen", +"four-poster bed", +"freight car", +"French horn", +"frying pan", +"fur coat", +"garbage truck", +"gas mask", +"gas pump", +"goblet", +"go-kart", +"golf ball", +"golf cart", +"gondola", +"gong", +"gown", +"grand piano", +"greenhouse", +"grille", +"grocery store", +"guillotine", +"barrette", +"hair spray", +"half-track", +"hammer", +"hamper", +"hair dryer", +"hand-held computer", +"handkerchief", +"hard disk drive", +"harmonica", +"harp", +"harvester", +"hatchet", +"holster", +"home theater", +"honeycomb", +"hook", +"hoop skirt", +"horizontal bar", +"horse-drawn vehicle", +"hourglass", +"iPod", +"clothes iron", +"jack-o'-lantern", +"jeans", +"jeep", +"T-shirt", +"jigsaw puzzle", +"pulled rickshaw", +"joystick", +"kimono", +"knee pad", +"knot", +"lab coat", +"ladle", +"lampshade", +"laptop computer", +"lawn mower", +"lens cap", +"paper knife", +"library", +"lifeboat", +"lighter", +"limousine", +"ocean liner", +"lipstick", +"slip-on shoe", +"lotion", +"speaker", +"loupe", +"sawmill", +"magnetic compass", +"mail bag", +"mailbox", +"tights", +"tank suit", +"manhole cover", +"maraca", +"marimba", +"mask", +"match", +"maypole", +"maze", +"measuring cup", +"medicine chest", +"megalith", +"microphone", +"microwave oven", +"military uniform", +"milk can", +"minibus", +"miniskirt", +"minivan", +"missile", +"mitten", +"mixing bowl", +"mobile home", +"Model T", +"modem", +"monastery", +"monitor", +"moped", +"mortar", +"square academic cap", +"mosque", +"mosquito net", +"scooter", +"mountain bike", +"tent", +"computer mouse", +"mousetrap", +"moving van", +"muzzle", +"nail", +"neck brace", +"necklace", +"nipple", +"notebook computer", +"obelisk", +"oboe", +"ocarina", +"odometer", +"oil filter", +"organ", +"oscilloscope", +"overskirt", +"bullock cart", +"oxygen mask", +"packet", +"paddle", +"paddle wheel", +"padlock", +"paintbrush", +"pajamas", +"palace", +"pan flute", +"paper towel", +"parachute", +"parallel bars", +"park bench", +"parking meter", +"passenger car", +"patio", +"payphone", +"pedestal", +"pencil case", +"pencil sharpener", +"perfume", +"Petri dish", +"photocopier", +"plectrum", +"Pickelhaube", +"picket fence", +"pickup truck", +"pier", +"piggy bank", +"pill bottle", +"pillow", +"ping-pong ball", +"pinwheel", +"pirate ship", +"pitcher", +"hand plane", +"planetarium", +"plastic bag", +"plate rack", +"plow", +"plunger", +"Polaroid camera", +"pole", +"police van", +"poncho", +"billiard table", +"soda bottle", +"pot", +"potter's wheel", +"power drill", +"prayer rug", +"printer", +"prison", +"projectile", +"projector", +"hockey puck", +"punching bag", +"purse", +"quill", +"quilt", +"race car", +"racket", +"radiator", +"radio", +"radio telescope", +"rain barrel", +"recreational vehicle", +"reel", +"reflex camera", +"refrigerator", +"remote control", +"restaurant", +"revolver", +"rifle", +"rocking chair", +"rotisserie", +"eraser", +"rugby ball", +"ruler", +"running shoe", +"safe", +"safety pin", +"salt shaker", +"sandal", +"sarong", +"saxophone", +"scabbard", +"weighing scale", +"school bus", +"schooner", +"scoreboard", +"CRT screen", +"screw", +"screwdriver", +"seat belt", +"sewing machine", +"shield", +"shoe store", +"shoji", +"shopping basket", +"shopping cart", +"shovel", +"shower cap", +"shower curtain", +"ski", +"ski mask", +"sleeping bag", +"slide rule", +"sliding door", +"slot machine", +"snorkel", +"snowmobile", +"snowplow", +"soap dispenser", +"soccer ball", +"sock", +"solar thermal collector", +"sombrero", +"soup bowl", +"space bar", +"space heater", +"space shuttle", +"spatula", +"motorboat", +"spider web", +"spindle", +"sports car", +"spotlight", +"stage", +"steam locomotive", +"through arch bridge", +"steel drum", +"stethoscope", +"scarf", +"stone wall", +"stopwatch", +"stove", +"strainer", +"tram", +"stretcher", +"couch", +"stupa", +"submarine", +"suit", +"sundial", +"sunglass", +"sunglasses", +"sunscreen", +"suspension bridge", +"mop", +"sweatshirt", +"swimsuit", +"swing", +"switch", +"syringe", +"table lamp", +"tank", +"tape player", +"teapot", +"teddy bear", +"television", +"tennis ball", +"thatched roof", +"front curtain", +"thimble", +"threshing machine", +"throne", +"tile roof", +"toaster", +"tobacco shop", +"toilet seat", +"torch", +"totem pole", +"tow truck", +"toy store", +"tractor", +"semi-trailer truck", +"tray", +"trench coat", +"tricycle", +"trimaran", +"tripod", +"triumphal arch", +"trolleybus", +"trombone", +"tub", +"turnstile", +"typewriter keyboard", +"umbrella", +"unicycle", +"upright piano", +"vacuum cleaner", +"vase", +"vault", +"velvet", +"vending machine", +"vestment", +"viaduct", +"violin", +"volleyball", +"waffle iron", +"wall clock", +"wallet", +"wardrobe", +"military aircraft", +"sink", +"washing machine", +"water bottle", +"water jug", +"water tower", +"whiskey jug", +"whistle", +"wig", +"window screen", +"window shade", +"Windsor tie", +"wine bottle", +"wing", +"wok", +"wooden spoon", +"wool", +"split-rail fence", +"shipwreck", +"yawl", +"yurt", +"website", +"comic book", +"crossword", +"traffic sign", +"traffic light", +"dust jacket", +"menu", +"plate", +"guacamole", +"consomme", +"hot pot", +"trifle", +"ice cream", +"ice pop", +"baguette", +"bagel", +"pretzel", +"cheeseburger", +"hot dog", +"mashed potato", +"cabbage", +"broccoli", +"cauliflower", +"zucchini", +"spaghetti squash", +"acorn squash", +"butternut squash", +"cucumber", +"artichoke", +"bell pepper", +"cardoon", +"mushroom", +"Granny Smith", +"strawberry", +"orange", +"lemon", +"fig", +"pineapple", +"banana", +"jackfruit", +"custard apple", +"pomegranate", +"hay", +"carbonara", +"chocolate syrup", +"dough", +"meatloaf", +"pizza", +"pot pie", +"burrito", +"red wine", +"espresso", +"cup", +"eggnog", +"alp", +"bubble", +"cliff", +"coral reef", +"geyser", +"lakeshore", +"promontory", +"shoal", +"seashore", +"valley", +"volcano", +"baseball player", +"bridegroom", +"scuba diver", +"rapeseed", +"daisy", +"yellow lady's slipper", +"corn", +"acorn", +"rose hip", +"horse chestnut seed", +"coral fungus", +"agaric", +"gyromitra", +"stinkhorn mushroom", +"earth star", +"hen-of-the-woods", +"bolete", +"ear", +"toilet paper"] From 88e9caa789c9df49e5cd593ed84cb24b261f8adb Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 18 Aug 2023 01:49:14 -0400 Subject: [PATCH 293/329] update --- linear_regression_class/systolic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linear_regression_class/systolic.py b/linear_regression_class/systolic.py index b7451837..7d594670 100644 --- a/linear_regression_class/systolic.py +++ b/linear_regression_class/systolic.py @@ -19,7 +19,7 @@ import numpy as np import pandas as pd -df = pd.read_excel('mlr02.xls') +df = pd.read_excel('mlr02.xls', engine='xlrd') X = df.values # using age to predict systolic blood pressure From ed82ac3cc886fc06060ed459bfd528a057256fbc Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 21 Aug 2023 02:57:28 -0400 Subject: [PATCH 294/329] update --- stats/extra_reading.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 stats/extra_reading.txt diff --git a/stats/extra_reading.txt b/stats/extra_reading.txt new file mode 100644 index 00000000..9dc9b858 --- /dev/null +++ b/stats/extra_reading.txt @@ -0,0 +1,2 @@ +The Unbiased Estimate of the Covariance Matrix +https://lazyprogrammer.me/covariance-matrix-divide-by-n-or-n-1/ \ No newline at end of file From 89c3865b4d00ca29dd317c4933cee4809e7260c8 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 2 Sep 2023 21:42:45 -0400 Subject: [PATCH 295/329] update --- cnn_class2/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cnn_class2/extra_reading.txt b/cnn_class2/extra_reading.txt index d68f40bb..28c1a1ae 100644 --- a/cnn_class2/extra_reading.txt +++ b/cnn_class2/extra_reading.txt @@ -11,4 +11,7 @@ Deep Residual Learning for Image Recognition https://arxiv.org/abs/1512.03385 Going Deeper with Convolutions (Inception) -https://arxiv.org/abs/1409.4842 \ No newline at end of file +https://arxiv.org/abs/1409.4842 + +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift +https://arxiv.org/abs/1502.03167 \ No newline at end of file From 74ac5e28970745c3329254c9b0f059f5af0402a9 Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 29 Sep 2023 03:46:03 -0400 Subject: [PATCH 296/329] kerascv --- kerascv/extra_reading.txt | 8 ++ kerascv/makelist.py | 10 +++ kerascv/pascal2coco.py | 152 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+) create mode 100644 kerascv/extra_reading.txt create mode 100644 kerascv/makelist.py create mode 100644 kerascv/pascal2coco.py diff --git a/kerascv/extra_reading.txt b/kerascv/extra_reading.txt new file mode 100644 index 00000000..57ebec98 --- /dev/null +++ b/kerascv/extra_reading.txt @@ -0,0 +1,8 @@ +KerasCV List of Models +https://keras.io/api/keras_cv/models/ + +Fast R-CNN (Ross Girshick) +https://arxiv.org/pdf/1504.08083.pdf + +Focal Loss for Dense Object Detection (Lin et al.) +https://arxiv.org/abs/1708.02002 \ No newline at end of file diff --git a/kerascv/makelist.py b/kerascv/makelist.py new file mode 100644 index 00000000..8498fa24 --- /dev/null +++ b/kerascv/makelist.py @@ -0,0 +1,10 @@ +''' +Use this script to generate a list of all XML files in a folder. +''' + +from glob import glob + +files = glob('*.xml') +with open('xml_list.txt', 'w') as f: + for fn in files: + f.write("%s\n" % fn) \ No newline at end of file diff --git a/kerascv/pascal2coco.py b/kerascv/pascal2coco.py new file mode 100644 index 00000000..3ffbd3b8 --- /dev/null +++ b/kerascv/pascal2coco.py @@ -0,0 +1,152 @@ +# adapted from https://blog.roboflow.com/how-to-convert-annotations-from-voc-xml-to-coco-json/ + +import os +import argparse +import json +import xml.etree.ElementTree as ET +from typing import Dict, List +from tqdm import tqdm +import re + + +def get_label2id(labels_path: str) -> Dict[str, int]: + """id is 1 start""" + with open(labels_path, 'r') as f: + labels_str = f.read().split() + labels_ids = list(range(0, len(labels_str))) + return dict(zip(labels_str, labels_ids)) + + +def get_annpaths(ann_dir_path: str = None, + ann_ids_path: str = None, + ext: str = '', + annpaths_list_path: str = None) -> List[str]: + # If use annotation paths list + if annpaths_list_path is not None: + with open(annpaths_list_path, 'r') as f: + ann_paths = f.read().split() + return ann_paths + + # If use annotaion ids list + ext_with_dot = '.' + ext if ext != '' else '' + with open(ann_ids_path, 'r') as f: + ann_ids = f.read().split() + ann_paths = [os.path.join(ann_dir_path, aid+ext_with_dot) for aid in ann_ids] + return ann_paths + + +def get_image_info(annotation_root, extract_num_from_imgid=True): + path = annotation_root.findtext('path') + if path is None: + filename = annotation_root.findtext('filename') + else: + filename = os.path.basename(path) + img_name = os.path.basename(filename) + img_id = os.path.splitext(img_name)[0] + if extract_num_from_imgid and isinstance(img_id, str): + img_id = int(re.findall(r'\d+', img_id)[0]) + + size = annotation_root.find('size') + width = int(size.findtext('width')) + height = int(size.findtext('height')) + + image_info = { + 'file_name': filename, + 'height': height, + 'width': width, + 'id': img_id + } + return image_info + + +def get_coco_annotation_from_obj(obj, label2id): + label = obj.findtext('name') + assert label in label2id, f"Error: {label} is not in label2id !" + category_id = label2id[label] + bndbox = obj.find('bndbox') + xmin = int(bndbox.findtext('xmin')) - 1 + ymin = int(bndbox.findtext('ymin')) - 1 + xmax = int(bndbox.findtext('xmax')) + ymax = int(bndbox.findtext('ymax')) + assert xmax > xmin and ymax > ymin, f"Box size error !: (xmin, ymin, xmax, ymax): {xmin, ymin, xmax, ymax}" + o_width = xmax - xmin + o_height = ymax - ymin + ann = { + 'area': o_width * o_height, + 'iscrowd': 0, + 'bbox': [xmin, ymin, o_width, o_height], + 'category_id': category_id, + 'ignore': 0, + 'segmentation': [] # This script is not for segmentation + } + return ann + + +def convert_xmls_to_cocojson(annotation_paths: List[str], + label2id: Dict[str, int], + output_jsonpath: str, + extract_num_from_imgid: bool = True): + output_json_dict = { + "images": [], + "type": "instances", + "annotations": [], + "categories": [] + } + bnd_id = 1 # START_BOUNDING_BOX_ID, TODO input as args ? + print('Start converting !') + for a_path in tqdm(annotation_paths): + # Read annotation xml + ann_tree = ET.parse(a_path) + ann_root = ann_tree.getroot() + + img_info = get_image_info(annotation_root=ann_root, + extract_num_from_imgid=extract_num_from_imgid) + img_id = img_info['id'] + output_json_dict['images'].append(img_info) + + for obj in ann_root.findall('object'): + ann = get_coco_annotation_from_obj(obj=obj, label2id=label2id) + ann.update({'image_id': img_id, 'id': bnd_id}) + output_json_dict['annotations'].append(ann) + bnd_id = bnd_id + 1 + + for label, label_id in label2id.items(): + category_info = {'supercategory': 'none', 'id': label_id, 'name': label} + output_json_dict['categories'].append(category_info) + + with open(output_jsonpath, 'w') as f: + output_json = json.dumps(output_json_dict) + f.write(output_json) + + +def main(): + parser = argparse.ArgumentParser( + description='This script support converting voc format xmls to coco format json') + parser.add_argument('--ann_dir', type=str, default=None, + help='path to annotation files directory. It is not need when use --ann_paths_list') + parser.add_argument('--ann_ids', type=str, default=None, + help='path to annotation files ids list. It is not need when use --ann_paths_list') + parser.add_argument('--ann_paths_list', type=str, default=None, + help='path of annotation paths list. It is not need when use --ann_dir and --ann_ids') + parser.add_argument('--labels', type=str, default=None, + help='path to label list.') + parser.add_argument('--output', type=str, default='output.json', help='path to output json file') + parser.add_argument('--ext', type=str, default='', help='additional extension of annotation file') + args = parser.parse_args() + label2id = get_label2id(labels_path=args.labels) + ann_paths = get_annpaths( + ann_dir_path=args.ann_dir, + ann_ids_path=args.ann_ids, + ext=args.ext, + annpaths_list_path=args.ann_paths_list + ) + convert_xmls_to_cocojson( + annotation_paths=ann_paths, + label2id=label2id, + output_jsonpath=args.output, + extract_num_from_imgid=True + ) + + +if __name__ == '__main__': + main() \ No newline at end of file From 818e2a50e6d779553c4a7c55c074892840ae5cef Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 29 Oct 2023 03:10:12 -0400 Subject: [PATCH 297/329] update --- matrix_calculus/extra_reading.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 matrix_calculus/extra_reading.txt diff --git a/matrix_calculus/extra_reading.txt b/matrix_calculus/extra_reading.txt new file mode 100644 index 00000000..a19af06d --- /dev/null +++ b/matrix_calculus/extra_reading.txt @@ -0,0 +1,2 @@ +The Matrix Cookbook +https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf \ No newline at end of file From 749a3c2963736132fe8c17f87290176324b232f3 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 9 Nov 2023 03:03:57 -0500 Subject: [PATCH 298/329] update --- cnn_class/exercises.txt | 2 +- pytorch/exercises.txt | 2 +- tf2.0/exercises.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cnn_class/exercises.txt b/cnn_class/exercises.txt index 4fdbc856..81a2e5a4 100644 --- a/cnn_class/exercises.txt +++ b/cnn_class/exercises.txt @@ -13,7 +13,7 @@ https://lazyprogrammer.me/course_files/exercises/ecoli.csv CNN https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge -https://lazyprogrammer.me/course_files/fer2013.csv +https://archive.org/download/fer2013_202311/fer2013.csv NLP https://www.kaggle.com/crowdflower/twitter-airline-sentiment diff --git a/pytorch/exercises.txt b/pytorch/exercises.txt index aa364191..6fdee299 100644 --- a/pytorch/exercises.txt +++ b/pytorch/exercises.txt @@ -13,7 +13,7 @@ https://lazyprogrammer.me/course_files/exercises/ecoli.csv CNN https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge -https://lazyprogrammer.me/course_files/fer2013.csv +https://archive.org/download/fer2013_202311/fer2013.csv RNN Find your own stock price dataset! diff --git a/tf2.0/exercises.txt b/tf2.0/exercises.txt index aa364191..6fdee299 100644 --- a/tf2.0/exercises.txt +++ b/tf2.0/exercises.txt @@ -13,7 +13,7 @@ https://lazyprogrammer.me/course_files/exercises/ecoli.csv CNN https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge -https://lazyprogrammer.me/course_files/fer2013.csv +https://archive.org/download/fer2013_202311/fer2013.csv RNN Find your own stock price dataset! From 9acd4a46a0f87a624a5250073572c9bf86ac6d67 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 2 Dec 2023 22:42:57 -0500 Subject: [PATCH 299/329] update --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 1e1737a9..cf30fa45 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,9 @@ https://deeplearningcourses.com/c/pytorch-deep-learning https://deeplearningcourses.com/c/deep-learning-tensorflow-2 +**Math 0-1: Linear Algebra for Data Science & Machine Learning** +https://deeplearningcourses.com/c/linear-algebra-data-science + Deep Learning Courses Exclusives ================================ @@ -87,6 +90,15 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== +Math 0-1: Matrix Calculus for Data Science & Machine Learning +https://deeplearningcourses.com/c/matrix-calculus-machine-learning + +Machine Learning: Modern Computer Vision & Generative AI +https://deeplearningcourses.com/c/computer-vision-kerascv + +DeepFakes & Voice Cloning: Machine Learning The Easy Way +https://deeplearningcourses.com/c/deepfakes-voice-cloning + Financial Analysis: Build a ChatGPT Pairs Trading Bot https://deeplearningcourses.com/c/chatgpt-pairs-trading From 569f7584ce2bbaeb9902227603c1c84bb7ce9f75 Mon Sep 17 00:00:00 2001 From: Bob Date: Sat, 2 Dec 2023 22:43:27 -0500 Subject: [PATCH 300/329] update --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cf30fa45..88841a1f 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,7 @@ https://deeplearningcourses.com/c/deep-learning-tensorflow-2 **Math 0-1: Linear Algebra for Data Science & Machine Learning** + https://deeplearningcourses.com/c/linear-algebra-data-science From 15b924ccda768b5c3210b88fa6b9f45329f4bb57 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 25 Dec 2023 23:49:40 -0500 Subject: [PATCH 301/329] update --- tf2.0/extra_reading.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tf2.0/extra_reading.txt b/tf2.0/extra_reading.txt index d84404a1..a23d273c 100644 --- a/tf2.0/extra_reading.txt +++ b/tf2.0/extra_reading.txt @@ -27,4 +27,10 @@ Inceptionism: Going Deeper into Neural Networks https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html The Loss Surfaces of Multilayer Networks -https://arxiv.org/pdf/1412.0233.pdf \ No newline at end of file +https://arxiv.org/pdf/1412.0233.pdf + +Tensorflow Developer Certificate Installation Guide +https://www.tensorflow.org/static/extras/cert/Setting_Up_TF_Developer_Certificate_Exam.pdf + +Tensorflow Developer Certificate Candidate Handbook +https://www.tensorflow.org/extras/cert/TF_Certificate_Candidate_Handbook.pdf From 05baecdc24ae403994ba3e22e8ff0705ad120fdc Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 17 Apr 2024 20:10:54 -0400 Subject: [PATCH 302/329] update --- openai/extra_reading.txt | 14 ++ openai/replies.json | 206 ++++++++++++++++++++++++++++++ openai/robots_playing_soccer.jpeg | Bin 0 -> 140347 bytes 3 files changed, 220 insertions(+) create mode 100644 openai/extra_reading.txt create mode 100644 openai/replies.json create mode 100644 openai/robots_playing_soccer.jpeg diff --git a/openai/extra_reading.txt b/openai/extra_reading.txt new file mode 100644 index 00000000..4d413ff9 --- /dev/null +++ b/openai/extra_reading.txt @@ -0,0 +1,14 @@ +How to Set Environment Variables Permanently in Windows, Linux, and Mac +https://lazyprogrammer.me/how-to-set-environment-variables-permanently-in-windows-linux-and-mac/ + +How to make your completions outputs consistent with the new seed parameter +https://cookbook.openai.com/examples/reproducible_outputs_with_the_seed_parameter + +What is Temperature in NLP / LLMs? +https://medium.com/@lazyprogrammerofficial/what-is-temperature-in-nlp-llms-aa2a7212e687 + +Large Language Models are Zero-Shot Reasoners (CoT) +https://arxiv.org/abs/2205.11916 + +Chain-of-Thought Prompting Elicits Reasoning in Large Language Models +https://arxiv.org/abs/2201.11903 \ No newline at end of file diff --git a/openai/replies.json b/openai/replies.json new file mode 100644 index 00000000..27d6761c --- /dev/null +++ b/openai/replies.json @@ -0,0 +1,206 @@ +[ + { + "review": "(1) His answers are sometimes antagonistic but the guy wants us to think by ourselves. I feel he was guided by questions from students with very little background on the subject. (2) Links are not updated. I understand not updated them on the videos, but on the git repository and the scripts, it should have them updated. (3) Explanations are great, with a few inconsistencies when compared to Gemini.google.com understanding. (4) The course content in general is great.", + "response": "(1) I think all students should respect other students taking this course. All students deserve to have their questions answered, and no student can expect to have this course personalized to their own background.\n\n(2) This is incorrect, and you were already instructed on the Q&A to ensure you were looking at the correct repository with the most up-to-date files.\n\n(3) LLMs are known for hallucinating, and their output cannot be trusted, especially if you don't know what you're doing. Instead, you should be using the Q&A to rectify these issues, which is why it's the #1 rule in 'how to succeed in this course'." + }, + { + "review": "You should have explained in the introduction video that, you have not yet figured out Stock forecasting, so explaining your (thoughts or beliefs or work) in this course. But marketing in great way , nothing in content other than playing with data.", + "response": "Try paying attention and understanding the course. If you still believe there's some magic algorithm to perfectly predict stock prices and I \"just haven't figured it out yet\", you clearly weren't listening, know absolutely nothing about finance, and hence, spreading misinformation to readers." + }, + { + "review": "I'm really disappointed. The last update of your codes was 9 years ago. Nothing is running. I tried file after file - nothing works. I don't think you could even continue to sell these courses. Unfortunately, I can no longer cancel the current course, but I will cancel the next course. Your courses do not meet basic standards. Too bad.", + "response": "> The last update of your codes was 9 years ago.\n\nNOTE: Our friend here just doesn't know how to use Git properly. My ML Github repo was CREATED 9 years ago (long before I even started making courses)." + }, + { + "review": "nao é claro ainda como fazer o donlow de githum", + "response": "Thanks for the feedback! Please re-watch the lecture \"Where to get the code / notebooks\" carefully and follow the instructions. It clearly states 5 times (yes, that many times) that the notebooks are not on Github, but rather, are accessed via the code link." + }, + { + "review": "It is a good course about RL, as all his courses, but if you are here for the Trading agent, don't buy it... very basic code and does not really work.", + "response": "Thanks for your feedback! However, it seems you are basing your rating around your own misconceptions and naivete surrounding trading and finance, rather than the quality of the course itself." + }, + { + "review": "Need to explain more about the topic. CNN is more theoretical in the course than programming.", + "response": "Incorrect. There are equal parts theory and programming. Every \"theoretical\" concept is implemented in code. And obviously, you must understand \"what\" you are coding before you code it. Please pay attention to improve your understanding, thanks!" + }, + { + "review": "The content is ok but the links between videos and sections aren't always obvious. I like the fact that it goes in dept on my subjects but the quality of the audio isn't always good enough. I would still recommend to someone that really want to have a better understanding of AI/ML or specifically logistic regression but expect some extra reading if you really want to understand all the concepts well.", + "response": "You should probably revisit the \"introduction and outline\" of the course if you've forgotten the structure of the course. There's only \"extra reading\" if you do not sufficiently meet the prerequisites." + }, + { + "review": "I expected some coding and practice but most of the course till now is just theory", + "response": "That's incorrect, and this is even visible from simply looking at the lecture titles. There are equal parts theory and code. First we discuss \"what\" we will code, then we code it. Obviously, you can't write code without knowing what you're coding first... Please pay attention to improve your understanding, thanks!" + }, + { + "review": "It is all over the place ... not very structured and i have IT and python background , still finding difficult to follow ... i wonder how the Deep learning course will be", + "response": "Unclear why you're having trouble following the structure of such a simple short course. There are only 4 sections: Numpy, Matplotlib, Pandas, and Scipy. Please pay attention to improve your understanding, thanks!" + }, + { + "review": "Content is good so far, but lecturer seems hung up on the behaviors of the participants which is not helping the instruction time.", + "response": "It's important to remember that the course isn't customized for you individually. Therefore, it should be expected that common problems that afflict a non-trivial number of students will be addressed." + }, + { + "review": "Explanation is not clear as we are beginners. May be improve better to understand clearly.", + "response": "Thanks for the feedback! Please ensure you meet the prerequisites as listed twice in the course description and several more times in lectures such as \"how to succeed in this course\"." + }, + { + "review": "Much was great, some frequently explained by referring to other courses as well as skipping some code blocks or instrumental variables, which were readily explained in chat gpt by asking if to add copious explanatory comments. Most, but certainly not all AI ML course instructors are more concerned with taking the time to explain finer details of the code. Early parts on ANN and CNN were excellent in presentation although this was simpler material. In the course was more presenting and explaining than teaching how to code the models, both necessary individually and together sufficient for a solid learning experience. Perhaps sacrifice some optional topics for more time indepth to the course essentials - quantity versus quality.", + "response": "Unclear whether you wrote this comment for the wrong course, as there are no sections about ANNs or CNNs in this course... Furthermore, this course is not about in-depth theory - please check the course description and prerequisites, thanks!" + }, + { + "review": "Some more details about math equations could be added", + "response": "Thanks for the feedback! Please ensure you meet the prerequisites as listed twice in the course description and several more times in lectures such as \"how to succeed in this course\". Furthermore, you may want to read the course description that explains that this course is about Tensorflow 2, not the math behind deep learning. I already have courses on that, so check those out instead." + }, + { + "review": "Teaches only Syntax and not any ML or theory behind how any of the Neural Network architectures work.", + "response": "Incorrect. This course gives an overview of theory, and furthermore, I already have 15+ in-depth DL courses that go into the math behind DL. Luckily, this is all in the course description, which I'm sure you've diligently read. ;)" + }, + { + "review": "It was a good match for my current abilities. I could not access the live python notebooks or links used. I would have appreciated solutions to the end of section exercises. Overall though the instructor is very knowledgeable and the course is free so I can't complain.", + "response": "The key is to follow the instructions. All notebooks and exercise solutions are provided, if you can just follow the instructions on how to get them (clicking links). It really is very easy, it just requires paying attention. :)" + }, + { + "review": "The Neural Network part is not very clear", + "response": "This course doesn't talk about neural networks..." + }, + { + "review": "need more examples", + "response": "The course is full of examples, please use the Q&A if you have difficulties understanding them" + }, + { + "review": "Could use a bit more time spent on explaining certain concepts, like Box-Cox, giving more intuition and explanation of why that is useful", + "response": "Please use the Q&A to inquire about your misunderstandings, as stated in \"how to succeed in this course\"." + }, + { + "review": "Interesting course with lots of examples and lectures. Although some parts of the course become repetitive. There are lectures where he explains the code step by step and then goes on to repeat the same thing in the \"... in Python\" lectures. It would have been nice if he had proposed other exercises with a different dataset than the lectures, even though he does not provide the solution. It is fine to say that we should try to write the code ourselves first and then check the solution, but when this is reduced to copy and paste from the previous lecture it seems ridiculous to me.", + "response": "This is a practical course, meaning that you are shown the code and can do with it what you wish (there's no way to \"exercise\" writing library code without first being shown the syntax, which doesn't make any sense). Additionally, repetition is a research-backed learning technique: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8442015/ Furthermore, please remember that the course is not customized to you. Therefore, if you personally choose to avoid repetition, you are free to do so, nobody is stopping you." + }, + { + "review": "I enjoyed the author explanation on the differences between statistics regression and ML regression content. But I think the lecture on the differences was pedantic and unnecessary even. The content list and summary to this lecture is where the distinction could be made. Instead, it was spent ranting within the lecture, how does that provide any value and meaningful experience? Worry not about the review, if the content is good, the review would come.", + "response": "> But I think the lecture on the differences was pedantic and unnecessary even.\n\nPlease remember, the course is not customized for you personally, it's for all students and therefore addresses common student mistakes, irrespective of your personal needs. Obviously, I have to answer ALL student questions, whether or not you personally find the question/answer helpful." + }, + { + "review": "The instructor seems inadequately prepared for each segment of the course. There is a noticeable absence of demonstrations that are essential for a comprehensive understanding of the material. Furthermore, there is a disproportionate emphasis on paid websites over free alternatives, raising concerns about potential conflicts of interest due to affiliate marketing associations with these paid platforms.", + "response": "Everything is demonstrated in Colab notebooks. I challenge you to list specific examples instead of vague accusations like \"inadequately prepared\" and \"noticeable absence of demonstrations\".\n\n> disproportionate emphasis on paid websites over free alternatives\n\nThat is merely what you are paying attention to and triggered by. Most tools in the course are free or have free alternatives. Furthermore, I don't choose what's free, they are not my companies, so I am unsure why you think this is my fault...?" + }, + { + "review": "I have taken this course and have spent lot of hrs and can tell that, this 12 hrs course will take lot of time to understand and complete (150 hrs). This can be done if we have 100% free time for 4 weeks. Good thing is if you have little idea of Calculus and we flow what is being taught, this course will make us ready to understand ML. Having said that the instructor should again go back and try to minimize the un-necessary or repetitive content.", + "response": "There is no unnecessary or repetitive content, and if you believe there is, you should use the Q&A to clear up any misunderstandings as instructed in the \"how to succeed in this course\" video." + }, + { + "review": "Content is inconsistent in difficulty. Instructor is not good at explaining highly complicated topics that require a lot of Mathematics.", + "response": "Thanks for the feedback! Please ensure you meet the prerequisites as listed twice in the course description and several more times in lectures such as \"how to succeed in this course\". Please recall that each section is denoted as \"beginner\", \"intermediate\", or \"advanced\" (as stated in the intro). One must choose according to their own skill level. This obvious lack of attention explains why you are having trouble understanding the course (since clearly, even the basic info in the intro was beyond your understanding)." + }, + { + "review": "you are READING the slide nicely.", + "response": "Incorrect. The slides provide a summary of the spoken audio, like all courses on this site, all other video-based courses, and all presentations around the world. I'm amused that you would act \"surprised\" by such a normal occurrence... The slides are not being \"read\", and anyone taking the course can confirm this fact readily, so this comment is both inaccurate and misleading to potential students." + }, + { + "review": "There are some good topics explained, but lots of tutorials are just off topic (not about NLP), but various attempts from the instructor the explain to others why they're not able to understand stuff.", + "response": "You just have to remember that the course is not customized for you, it's for everyone. The \"FAQ\" section (meaning \"frequently asked questions\") answers questions other students are asking. It's unkind to suggest that I shouldn't answer questions from your fellow students." + }, + { + "review": "It doesn’t make sense to have to make an account outside of Udemy to then have to read an article that gives an analogy about \"burgers\" just to then inform you that you have to wait 31 days to view the material. Just upload the video like every other instructor and inform us that you have other material. Don’t force us to the material.", + "response": "Incorrect. You don't have to \"make an account outside of Udemy\" to take this course. That is for a different VERSION of the course for students who want to go above and beyond and learn more exciting material, in case they want to learn stuff OUTSIDE the course description [which I'm sure you've diligently read ;)]." + }, + { + "review": "I feel pretty lost, I feel like showing an example of what we're trying to achieve even thought it comes way later in the course would show how the individual parts of the course will play into it.", + "response": "This is what happens when you don't follow the instructions, like meeting the prerequisites or using the Q&A." + }, + { + "review": "Teacher is good at explaining concepts,albeit he has some language problems.", + "response": "English is my only language, but based on your comment alone, it is clear that the language problems may be on your end. I suggest improving in this area to better understand the course, thanks!" + }, + { + "review": "Honestly !! this is highly insufficient material. When we open the books of Machine Learning, we are lost in understanding the mathematical notations. However, this course is teaching Integration way below the levels of Class 12. My comment - It needs improvement", + "response": "Please make sure you read the course description so you understand the purpose of this course. For example, hard integration problems would not serve that purpose." + }, + { + "review": "The course has no depth where the instructor explains an intuition and runs off. The exercises given have no solutions and you have to do it either yourself or you suck and they can't help you! I wouldn't recommend it. Feels more like a refresher course than a course for someone to learn from scratch.", + "response": "> The course has no depth where the instructor explains an intuition and runs off.\n\nIncorrect. Every concept lecture is followed by Python code.\n\n> The exercises given have no solutions\n\nIncorrect. Exercise solutions are in the videos... please pay attention to improve your understanding, thanks." + }, + { + "review": "I feel helped by this course, but I am a bit confused about understanding the Markov model, but in other materials I can smoothly. Thank you for making this class, I hope you are always healthy.", + "response": "Please note that the sections are clearly marked beginner, intermediate, or advanced. This is so that you can stick to your level without being confused about material that is too advanced for you." + }, + { + "review": "Thank you for your prompt response. Let's be frank. I'm no novice to this topic, and I took your course hoping to get a fresh perspective. However, I was met with content that seemed hastily put together and felt more like a reference guide rather than a comprehensive educational course. I've previously enrolled in some of your courses, which were of higher quality. My feedback is based on a comparison with your own past materials. I hope you'll take this as an opportunity to review and enhance the course content for the benefit of future students.", + "response": "All algorithms are derived from scratch and based on the prerequisites, it is not a \"reference guide\". It seems strange that someone who is \"no novice to this topic\" would get those confused..." + }, + { + "review": "Too much talking, less content till now", + "response": "It's a video course, I'm not sure how one would avoid talking..." + }, + { + "review": "While this course has multiple sections on how LP believes you should be learning. That time could have been spent reinforcing some of the more difficult concepts with additional examples.", + "response": "This is a common misunderstanding of the appendix/FAQ. It's not \"That time could have been spent reinforcing some of the more difficult concepts with additional examples\". This content doesn't displace any other content." + }, + { + "review": "Therotical only.....No Example......just copy from book and paste it.......read it.....No Implementation...................", + "response": "Incorrect. Everything has been implemented from scratch. Please pay attention to improve your understanding, and please watch the FAQ lecture \"Beginner's Coding Tips\", thanks!" + }, + { + "review": "I haven't proceeded in the course yet but I wouldn't say I liked the instructor's stance on students asking dumb questions.", + "response": "That seems very weird. In the \"how to succeed\" lecture it clearly states that I encourage any and all questions. Why would you disagree with that?" + }, + { + "review": "If you hang out on YouTube probably you find the same information in the same time", + "response": "You can say that about any subject. The real question is, if it's so easy, then why haven't you done so? ;)" + }, + { + "review": "Generally useful but structure of content and direction of course is not always clear. We jump backwards and forwards between methods more than I would like.", + "response": "Each section is devoted to a different \"method\", there's no jumping \"back and forth\" between them..." + }, + { + "review": "I do not think its advanced stuff at all, nevertheless its good.", + "response": "Read the course description to learn what this course is about. In addition, please see the FAQ, which answers questions such as whether this course is for beginners or experts." + }, + { + "review": "There is constant talk about not having to understand the theory but it seems like the maths goes hand in hand with the models so not sure if it is feasible to just learn the code without understanding why you do certain things", + "response": "Because you're not implementing any of that math yourself, only using high level libraries. Please pay more attention to improve your understanding, thanks!" + }, + { + "review": "not get any technical knowledge yet", + "response": "Why not read the course description so that you understand what this course is about?" + }, + { + "review": "I came here to learn industry level but it does not meet my expectations. this course suits you well for beginners because you can learn all the math and coding from scratch.", + "response": "You simply have an incorrect understanding about what constitutes \"industry level\" (hint: you are not at this level)." + }, + { + "review": "there could be hands-on session rather than pre written code . This would help in understanding the logic better.", + "response": "I've instructed you to code by yourself, not to peek at my prewritten solutions. Therefore, you have simply not followed the instructions. You claim to want to be \"hands-on\", yet you haven't even done the hands-on work I've prescribed." + }, + { + "review": "If you are new to AI, don't take this course. Find something else to start with. Most of what I got from this course is exposure to possibilities with recommender systems. It is not the most organized course either", + "response": "Why should you be new to AI? You should meet the prerequisites, as instructed. Furthermore, it's not \"exposure to possibilities\", we are implementing many concrete algorithms. If you're having trouble understanding how the course is organized, it's one algorithm per section. I suggest simply paying more attention. Thanks!" + }, + { + "review": "sometimes a little superficial", + "response": "Thanks for your feedback. Please make sure to READ the course description so you understand what this course is about before taking it, thanks!" + }, + { + "review": "I am an industry data scientist with an academic background in Machine learning, I have done several deep learning projects in my school years, I am taking this as a refresher. But equations don't have an explanation of variables, and what they stand for, the instructor repetitively mentions that if you don't know any equations you are not ready for this course but no one knows an equation, how it is derived, and what all the greek symbols mean right off the bat especially if you are away from macadamia for a few years. If you add additional resources(eg: citation as you should!) we can read and understand your variables, also many textbooks use different notations, so you need to make your description clear. Also, I don't like the tone and rudeness of the instructor. He sounds like a mad professor, this is a recorded video, so take away the anger", + "response": "> But equations don't have an explanation of variables, and what they stand for\n\nThere is literally a lecture titled \"What do all these symbols and letters mean?\", which is actually a review of previously taught explanations (in other words, variables have been defined multiple times in many cases). Perhaps the problem is that you're simply not paying attention...\n\nAt the very least, thank you for making it obvious to the readers here that your claims are unfounded.\n\n> Also, I don't like the tone and rudeness of the instructor.\n\nYes, I know some students don't like being corrected (as above) and construe all corrections as rude because it's impossible for them to be wrong. But how can a teacher do his job if every correction is interpreted as rude?" + }, + { + "review": "They can't send me slide for this course.", + "response": "Incorrect. Slides are available upon request. Simply use the Q&A as instructed in the how to succeed lecture." + }, + { + "review": "The instructor is poor. Read their responses to other negative reviews. Really off-putting. Constantly stating how people are INCORRECT showing the instructor clearly has no ability to take constructive criticism. If only the instructor could A/B test their own responses. Honestly thought it was a good-ish course. The instructor earned a 1-star here.", + "response": "No, \"incorrect\" is used to denote factually wrong statements. It appears you are too emotional, focusing on your feelings (you're offended that I've corrected others) instead of the facts." + }, + { + "review": "Please respect yourself, you must be ashamed of yourself because of this. Peyser", + "response": "Thanks for your feedback! It'd be great if you could provide specifics regarding what you didn't like about the course..." + }, + { + "review": "Mentor is providing slides for video and he is also fooling student to stay 31 on Udemy but after 31 days I am not finding any slides and note from instructor side.", + "response": "Incorrect. Comment speaks for itself really." + }, + { + "review": "This course kind of breezes over the topics, there are colabs used in the videos that we do not have access to and Section 4 is irrelevant to the course, I would rather see my on the topic and learn something than how to install tools that I already know how to do. Section 4 should actually be on his youtube site. There should also be more links to the tools he speaks of or uses, you have to stop the video and actually go google the tools and search for them. I am still trying to find ffmpegexamples.ipynb that is used for a whole segment of the training but there is no access to. Good course but needs a lot of fine-tuning to be better. I hope to see more added to create better content.", + "response": "> there are colabs used in the videos that we do not have access to\n\nIncorrect. Lecture 4 is called \"COURSE RESOURCES\". Any guesses about what this is for?\n\n> Section 4 is irrelevant to the course\n\nInteresting, who should decide what's relevant to the course? Instructor (who understands the course content) or student (who does not)?\n\n> I would rather see my on the topic\n\nThis is not even a coherent sentence.\n\n> There should also be more links to the tools he speaks of or uses, you have to stop the video and actually go google the tools and search for them\n\nAgain, no you do not. This is what happens when you don't pay attention.\n\n> I am still trying to find ffmpegexamples.ipynb\n\nIf you paid attention, you would have already found it.\n\n> Good course but needs a lot of fine-tuning to be better. I hope to see more added to create better content.\n\nNo, you just need to follow the instructions and use the Q&A to fix your misunderstandings. I don't see how it could be any simpler." + } +] diff --git a/openai/robots_playing_soccer.jpeg b/openai/robots_playing_soccer.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..547761aee6c3983fea1e968b46bbeded658495f3 GIT binary patch literal 140347 zcmb4pWmH>T&~8Wq1d0SH#Ty`KDMbqe_uyVy+}*WMpadxHt^tZef#ObacXuf6v}mEw z_v5>3-F1K8IqUqIwf8!+_spEtn0LIe;_-_Ru1;7G>F~MLg zOiWBHEG%ps0vsF&1cw+OAD7@cF&Wu&Vp7rL93nV5DLMTATmI_?5J7++!0aFlA^?yG14M-JUoU{>={qqo zo&@lJ1qi?ZVPb)?AvjNJDhvSVf3^Ql4Zr|`z?fLr|1AUXKo|fZ6a;-*xncty9r|{c zI(ess{t;K-zkuy<@lr$7D(`cp)&x4kD1H0j#=4>M;XiH!F8U|=+rk7{bH?GzI;ZH*Vn%rr zs#yNod$;Y;97n!(nNz?2sho*;H+yZ@uAs+SN(83AO%&b6PBsdESMp;xoF=pAI<``R5mW z68-2R8u60XKrs}yzb~{-Pv0RKj$jms!}ncUe)(+p2fZo zaeN1;|EFWr@zB^lUZggyJ7Sfq?P)Royq%0imTA!yU#ou2QO4Qurr)I<>-*}xclOAa3dFT` zBo%fcBI-Xt>J@*szvJ7~lKq9_M}HlHhoxAruHQKfhkikU9d^x8h&ex}Z`hgF&UYPF zF@A)!pN~6C9s3S`@UK+=el$S4?5c3r+0N{0sGsg|Uv}TMM&jtovtERHsxX$Y zD!TMi`C*>nLxEh8-?!Ve>CnvLP}+xmtK|3We{OpB&+KhQz8W-G`iv!Ly?Yp`uxkB1 zGJ0|A?CN&9v@kZ;fF#8C55E(0lSt^r`W2(F!t64me)239J61LG?uTB>ZHLr$hku8z zmyMt4{{F7NHEpwMw8aX4zV|(RU{EzbISg8eMM*S%gCJ{l(Rp<|eoAML_Ae8IXIFl( z9FB+#o5m`M*OxOb_u&*qS8i90{nkI8{`jbOH@@*>yuD4hYasLWD-!6<8KKeY&CXzZ zXwN&r65n-5LwWBMfBI)aHD|7vcUD8-?Xn&aQ7cT zxA|B5y)U(Xz`{_s*@+i~O75yE|> zyQQs#mCyd3$mk1EuqBFM1TYdZks3rXwKk%R0)oLYODFX+v7!sga^_lWO@R;Uq4paE zavtkwk7uljCCQ7}GPPORh`n0e4C`K&G|f;FQ!@1*wHIE?Zr3La?5Tx&K5}v#u7nAP z8PG<_72Iqc2$f-or+18DD#de;u@T>g7Lx&7*mC04c81!WZ+pU9dHW6J@-CH}H?pmY z1C9CAf$y`bCP5o@hM>&BTr5=ZPcO>9I?T1ZLf+>q17+ov;^o@=*%(2+$SobSkg?~PV~$G#3kXR5=#2B+f%z8{0nz4^Dh@;DrskJlC7xcLxK&1scqTI9@0gj z0Aq1f7q()y-(gp%hlOidPh+YsZQ7O3{D>&}ef-5|Swgo<`4|VNcbpD3r#S9{sw}p` z?XyvnX10TpEfpB_Doe<3_aVp+dEB|;-&0?s^L;U(tUv!@gDL&ja=To;=4Ci{a6m7V zDZxL!?_`^AH?e$Va=w9}uEVRm{-Qm-<#%HFt{c`rq7I)=*?g7u2$AFATRRa#u~%!` zw@NF75`0OvIUXsCqBOPC9pCRo<*4FqDnq!R99Fh?+i?2J0f_JZ* zIoRG_p6J%g#J&GgQWx$uCzV-jw_WbR(#DcMlXLbxT=mQ6U?SJiT?6&G*a%2#e) z3sw6&b?AR@(pbZ;VWQm6dee3H_#Z&+^eKc(E)31ZqL!eO3LIUlyz_3jWi+Ek6!(UU{(0Ln|P4}%OIwqroplxOk6@CpvUdtXS2WbLb38k<|2WOfU`YszFAN06 z!A$t?^z5&?1GxG;_iOu?eTBGa3@!UFuW@uLOi3>OYCPj6W9Rhr{93cu2DJMY_dR22 z=WAfvVI>cG+&EaUGjN>$8t?9axBpL7MM?RTTOl|GI27Mw*I}dp*XGdx=dtXVedQ}8 zUCPh5-y0933C81c$!e?VnN+3{EY)Jpam@1cO}nfi1(8dWXrg0KR}#Y63{AC4795_jGR^h-=ftLU0vk8#C6V}ufLeG_4d5X4!76{oqdKj zAUCrirdygMZp01ImE7JoFr{mf=4O`RYn)JJUw}`=Ch?k{ke3y@^@dYGWdp4y|0MI; z5C!LteW)6*p{$gxT76e`EXPzHAosj9@pZv6_N=!UiFTx}hM{Zl=stll=i<*pM|!`l zhp=Zw8NIT>BQ*KW+h#6H7MRR(!VLwk$P8;R3u=}#53)i85)Uvn3C`V7sQkIWBY4n= z!iIjteo^e-de{8#bForYO_b>0nzLmW?++Xm{Q4iStjBchd|Nm)V);mw@E06Q8jcD! z+EtR1i5Lzdw_CqGI4sbK_Hxwel>PdV`};SI;GDN%=(M3#@x`^f(slgq!QyY5)0Kco z2mZ-$7SnA~%wHrIhUsp+ft;|b)%wGXi_`DM2UZYrcUU(WCntm(>W7`}L_LeCUB&7+ zu2lcrw9&$p*rSDI`Yhun-Jj&!ZQ|L7!(9y@k%^FE7KK4j5}Z(V;B#)&b$wv|glk*B zj?%DCLU!l7xm3sB^(B+dQ_n3!ULasoaxiVY5fBI!NFIPc!oS9~jr&5`$m>V9Q1rMjMrt z3_lxXAyXo~YK~tV3|)+9hkj`j(^Bobn)oKNK-Gs?VYOosbGL)z`w7BAY4o+x52)w=n4(+Tw=qdCpV`SZ9BjF#hF)qL`qww8 zML6#K^uOdT;5MyH-9O~WRer|Se!o#Ae09;J@vGA4>*ex<%*@`BP1JiYi(jYL1_JFu zjwI5`zs6MUa?>T%Jn3d5w;f1L&OR7L9}44t5>=ftt?d;beBZSQbU+mBY;wXYV?E*UqJBfMn$~EiTn$nJR=LhgH8{tiW|)Uxb^i<4b=#9kmC$0vpjFn4bhl5M1x)ZB^4^&wEM` zH-UwdX<48uN`30RMq3W%{zWyAe&kqP0Y@86QDz;ldS~(OOWP-DMB_}q#U#bMkz~^> zf7Q*N0l{SO_i{qU+Je3vt^38jeZL?(R)-Rl`a)w(}nqEGre6yV8qxDx``T6IC32k&xsB~f~qdv9Y z?48H_mD>zeuf=h;zp6K%rufSG0l~Jpzx>A*<-TncY!7 zSPR({QT^oL&QCI&9~lmif3d^=T;{2bw6?C5_8>b@N@{Cru=-tViTBw(ppAWl)rh8nG8cdv>Y9?21a}S8if=spI{H zINxs-duJn;H;Y#;=EW`D%_988(Ugm`>yNls zTNi#Dp*%yob%v3M@TJwxu?#`$$szTEhvT-t1lor)M$8U5`7(YOiD52`RA6K-q}L?t ztNSgEn+BQhib-Lc_AYO&)+cr`qqJXPhvO>fl>Je%>r;d6b_)}onQ_FW;>Z5;%-0gU z5GV|m^qTlSkU`yT>y;`eM``)uhfLAY@fJW(6r32cL@b2SQ%xLOoKIQV!Im{TfNgnP zXy|s_t?lUdfkRADZfTn4yoXJJZkg}V-R^*r znc+a;x#g+v4iu+P^M9rOlV9|XU{@^DJ;yZkbm@Q|DB05^yWh+k|5fI-7C)`o`@hHjWYM@T zu~e!`)DCVTew7()J57=*$p#Oi(sf=Ur{>l3)AKc#6S|#CvWQvo_E!EPN1OLYeqXm0 z<`&35Rz|k+eH^7}+ZO7(!h1td>@4QtH2VEu+|4J6>TX5(&!bFVV0x+d-6uvdo_4nW z0Lg3G-x(JxY%+(|hqlyp7mrF~uVobub5x_cIx~n3a;4s;xgulo%xC}bp2oJxR~$Ar zTDPPlacq+exb z4`q}E$b)mm(>HJ0(8Y(VcIi5!l>J?DlN5)aIB?YVnwxW&m^q#$BPqVIy9<|<1E0-tFxKlXk5x5yRVawsoE9sZ)dpNw2CF9^@FMdIVP}d!B zia~pt1$@ZH>+xdQCHU>uST38!3h%aVDp@o1UD7^j*((ZNWkXcIB3K}Khg=?#lJgyR zMNj5Q)SN2Q@lUr;POCxVX!|&|)%WR7;iDSn9z*|3nQkZ8A6hH9n3C|~l?$Y{{*S}EPD+^Z%bj`1LO_I*R#;;+K^?~sU)BsliyaqPy-?sG?i&T9% z?CM)|WYq5C((zKv(k^z%t0uT8lS{~8<@@^mg_Nsc8{Xt`pP%}BQtD%d9) z8Dj2i27*-1F@T~>OtP6aB96R4ULnTCxvMvcn@$iO3MS~q3o5(X(E0AQl2F}sCp_h5 zel(pbPx|ifwWqrR5h6a2&bH&ZAv3!~H77XH4wKtMv?@`@pm3~y-u907+ups7o}j+{ zJhS-5dt`ZoCYz6Tup9G;tsjZIMPDZM!B_j|UNfZ$i6L5CyqYu(;9m#NFX*K(PMk2t?Y7w9VBV+omZ22u${JKn;hlaDwh zQy~44s)RD7)hg}UMg@5i^otJ!N)JDV_uNSw7wldawSIerLlk2k=3G)a^?9R3ZAJ;S8{S+{ z^ragf3TG^HNyOI#Y3^Bx%!Opgdkp38FO`=bHM8~~5lM^6N{-OHcs&aKy<1E#uneMT z3`^BYcDuB(pYz!EqLBNfJH0=2#h03oF4(!1uZW;1BB=b9I`yaLr;l0B-sa2FgWC26 zUeGs%ix{l0!#3^>;Z^-?E*U4uwi(U(-_z1Jl_$#A-474^^j1TJGKHCGxf|i~LzpjC z_>_!eEF*EsM!4e&bf=_S$!Q2Q(zRHr3#EbvQ9w*HuCM0B8W;*)u1sHuDxc4b{^&itMCAYZw>Z$H}nu5J~U?noT~!wq!GpGY{g(aT#4KO6c8AX z0tDuDQjgWI-<+Er96G$v3=uegk--Fqsc370q|=ck@mSbc?iaW;!={*$@mz^yB#64# zb(O8fpV2(Qd$(N84a9N;z-7HB`-sDtD@;<cwQV}DUe#rbdX&xNM3$q)5}FHra!Nax z6;FogkIT{Ik+Jm26C9<YeaREPtTbIhf*JTaU(9Y%&AriK)1 z9LP|TMg&8kOfYeH7zj+~QG0#Q-cqGklmyL}eb&EQ(L(_>^~e`cVx`6;An1}b1rvmt zN^zAar1wNMOb?VVM5=HQ5Rhpc1<_^Lc4T_i{k2Y1h(by*ih57)swarU;;ASC7Zn@; z74RB`%GQ}yy`XnqzRNm1Fh@51?)aFMKl;uB-=`j5A6@-H6Am=;eCi0_$E-|D{tNkV zVI>ixyS^St5Yf5W@JWYSF3wy3k+V=!sAIFO&^~J{n~))bvpl@4-!!h2dOF+D0K_LZH1j-6u4(d$>LW2s=Pyy;4z#%}Mdb*e)T z^(*ZfZlPL3nLXEi z-)3iGgD@HmDXy$0My1`!h!v{VaD!A zL8(Gqb>3e>P!U`(p)wBg3T6X^LdZo38StbxQC%ghU@&DcuEkM?Y^f(g#A)q#^L)BK zYs|^A?XL=T=%8Zt<@0IR;VjIjf(8&AieZZJnyBOUU$ID0ithc37T-{>>0g$jR(Mw9 z_Dj|AA)PSsLe53X(n;7jF=ibk55qOVIed2ET}uign;~YU)Al2g3`rL$SNbbcpQBUC zs~o+uM9s(`niqz3_lHAShY6PxFVRj?diQNsiDOhKrD0MKG{71MD;yJsNW^;9&b#Ds zXuuV+70TE}2Wqi^L!iQ6v^q5eV9w&G{>^S@@y^ao*wY&z(N$?M7qS;Re^B4?t$kPh zsgQH*N9&`Cx%DvDmlV$$t_j4z!p2~nd^#lJV}xj;*ZHT_#^1tnhPK~3&h8z=UcA-Y z7d48_Nq^fp4HFNrRhwJ{6DzF%cn`$K63E3*@n^$1xG>G#Jy+TqP4YuB@|KtpIf~at zs^x=C+%=A&Gt*Te-fWI_rGgBY8=+I@<%7PowlVhWs|k}z@vL_F!1!>au_YWn$)ilS z*okC27?4E_Cn+JkthjFnWG3yB8gaFPe?*SZKh*; z&7xVuS(R{hleY_X%^SJm$=#2MJ@d@E=ge#b)TmO@$YQC4$~Y(|UJM?DIFQl|gDy@9 z=?Nju$D~_4&|o8Bkiq~3n&p8|PZ>RAOG`KkLQM|k$&tQBRJ8kDLLIN1SGJH}=Tx)j zDLDi?AI^CidUGwGu2fS#W;d?0lcV8ZN5`{+8|}8^^~NL7q_KOB*lyttHbR~k@jwy~ zo`NkAdnpN3F=JN*R2~1mhj5X)bv-y5HUHWd4#jGOCBfe9VgN2H_MM_E&sB8^#pDntJtWr1*)qT zjI#FbtUk{f+HL+GNO1O0fuyhknw=?WS;Jk^vaQOec4<@o#5=$H9(fw*u=2a=Swv@2GKEPU>5oY{<#1hU0eCwV;wnTs&^btCXoN+MF6|`~PGv|rA6$Zb;3!*Ceq*M7< z#Hvst#87_A?Vvr|Q|LJB71ySVt5R*F{1SX5h}2zpgboxG6bQtY0)e#QxJd*9Kn!e& zq|N|T0gTwNB&($e3p>6G1qym{HN`p7C`=8^zp7Innj@E$EMgkprmyjRg)I&HNygss zd9B^=2ongng@SRgh>|+T-$MmBaaefQLuCjn=C-81(ln{R0#MQDz9p6O_Ru zWKiqld=f!=O+dmXM}+0;XMTpodQIzJ=Qjk#wrW0F_2D9c@rt%!A)X@hd#a8zG;Lw2h&wlj zU|R51Nxa1t>4Ilq0D`UVQKxumBzI_LS5RqKpHbEkMHMEVPH9sl^(@ai$Y#wj_Q=cI zpD1U0A~F)BFfw%zT6V6Ubs@09qOcYo#Tz7F;>=1-jH4LTf)wBE3|A806fgRMuOW^? zfiPMuScp^jpqv5Uk zvjvGGk3HW?p)sfVM~8x`1q)i$9gD2ZE-Pgl6bh%b23qeQs$5mN@SUr$Wk0p0?DAYJ zXVf?51K|C|GtBAuka3y@EXKK*dT&+jDbK%n$H}rrkWse*UUj8sO1zp0C{l2uN{FNx zD2P&s{ib50VlJrg!=AD5NBsHn*4+D>)q~7PF}h1{alrr>T>c`j2oJ6Yw_hb9q*(y6irqBBy?YuRa4k0{PnK%;19SfgTJ?|6-1T(LW z_{!RfGYLh1HGdfm1K{RT#3@M3luvtzI4R#cD90;u#qMU>B&Hs9t7gsHmslb0SFC;`OA33g!(l0YKq z^MODhwj?%Kk*h1MK!%8&00Lz#Dm4*Ue2NSq46p_kSiyu2lBuK+UX|By^0UL71kLEq1DWU=;g?rF1rLd_z zgT_S@K**^D*1uaH@#_2im1w^hZn*q?-u0Xi9&ktuPtZb=z{CU9IK2bG1O#DgP~s$F z5Gb#!tJB(D7*ngaE>?=PKub7U4J5vq76+4ond*}hN#iKKk0Vvp{S^9RZo}u}w4g1b zd0(6O(Tiughy6ITVD;BULr_Ok|EiV+^?v~V!KFGbY;$8{hx<2qGbiYjE7R_>8n+>= zgltPe@2d1KmORA=?Qdkzef~Td`yYhOGC3_c>F6S9fV^-BMMxfg(&E)=^>Kx&ft(%2 ze!tszA1Xu=D!wT_Ki|zsPrN)pk{1etP$$4TCyh5vSkfR+h&;Eov5XKgb&4qlP$DQ7 z)dh@?aQ1=Xg;DlsL`Z*6vkAw;I|P-ArycUh_#E+9gc>C4Q1G|^Rit#DigzoYVAn~{ zsl9)!<)IE8DZqt{Y2UsQRYu%NeLly9)`Z8U%E>{>V|?Qohu zz?;o?Z(mON4P6hK2k+;wG$z5%M5cIqJYG05i1a5NtCh>tkFv*?%Ii_;Q`mfO7=Aa` z{If)wGClDa-239!)0S{@#wSoigtUghN$|V6C4@(UlA%xh))6fgzuE*d7vOFr?kI zY3%UZ_*CT9$i#y$>7JCOeBlY=jniBOIyBo=78J`6W*Wx@-~ti_1!92lQflW8w{ET% z5dHjz{enpf2Ni(o37;Vi5tno}hfYogQ-p*PVF0ltxH`q-%pXA5h|5@tficNmJ@mclIUDe0*DPO&UoPg|EZBq+vQcI6WK?NLp3 zy1&=9o2B=Kv#|_uip{IFo8xoz{6E3H-Q9H^HWK_9@!nhyMN1F;yDr?z1E13SW4*{3 z#BY-$%KD4D3sENlrMj7hP17x(?+|i2nEO)+AIj&{`Ft;CSaHI`fniJa zxAztI=Q(9u9b`WK?T#&Ljv-z{0~VD(i_-BPqs6S=6yJRG7rdsiIgcE9CcVYY&G;`#*q6w&zJZI@&MY@;mJtrt{T?CkYy}>K7mh@bm)0Pyr!ZF1ft2DCq)$EYrh-1o-5rB_(me0EWh{ z01gNJ>mev{cwi7VV?coxOh!N`DYfoXwtxE4k5O7|8c~XZG!=ogU7obzs~0rx!eRuWu^P3HNjIyDzPdj9x5UVArNN1^NBROsUO5 zi!%A@oQX*-&B!r)qjbslMtdLxK^*6;<2~bT>yqz64i&^3EJ&-AuWoLh5?6oQxD)$@ zvpk#WTjJ!REB@;$tICEwL7(cxZ=bx0oEcejcHTT5|M? z__ym7;**N@zqp9ysoJkR`jL8a&H4B_v@<{UQp|`jB<{w+hJo?hdIMr3udLKXwmg>F z_qe-To*E*ZH^Tkt+^Deq3_bQ@v;&*blcHdoeUMFK?bAm4hI-ppPo009{)D5px2N04 zVN!CL@) zEp0Gf6O?dbjC6L56T(3TA?{~oiwI?!fhFc6rIgN6Cd#=w@cCSB*QguY;zvd@vv(MH2v>}*# zEqu=OR~4Oe`MIFrZYWwUfsU%|Uxc08&NnUt$iW!2KtV=Xomb?uXUG20mD{tCeBg&i>afH`(^L5S2SpY6`MoEdPoHFpnrGH@BOj+*5!%SIFE>xS#0V{&fngh zP;P-^9b!s(VvA?yc<=xaHU@|aS&{^o=L!+;CTb!r2*s8T2#3;tH3NTRWPL&iTmi{c z@vLzw(nOL9;y9U1!PU0%GE$9or!&*-G!@$3{{i5hjULUoTBR1-5(T;38*nr1kni) zTg$S{Xt|HD$WE6Zl&{_x72P^4v|Nl+KO6@EhAB9yxVpOJPiX%G7#Yb}aKE46$x%OY ze}C*b?pD24jQ;#;V(zN4ieciRQm8V!@$Ukbw>LIwFa+M@EW*IDc$f=FgF=XrB=(`$ zAW~y-DK6^30F&Is02V>}5Fk+kY*QtG8j45+f&)C@3T0S`6!0NSL{=dndju8rtZwcI zLEDX6Rp1WquA25nA_kDu<@8slt_;GNLX!`c&xSdicL zaA%WPV?PPva|scxaGE|>&Fb(Pk08kfux3anYAP=u`>dUt%~Z@6^YAS8zf5HxptaQm zG(?gKkoJ5x_{2ZyeqmE77mJq~KT&>*IJ<3KQ<4?ATJ*B6-)@L~HP+@R5?Z*@bXuvz zFwiCnuLw8Z^hx8v#G<3Z03Se|EfHWEoMJ2>MOc9rC)XQNB`NU|V-rONj6g;@AT|~V zODYL3eh8+PPN6C>xOwL2-tP2n*+0~*Ha~p8Z8xQ}VpP~7!_IZ~z^3)sW;bbBYGSV8*}ZxZ-m@|EM~7P-2N-9==`~fez(T5#C-F^AX7{nz2QNs8Xce$~F}Mn~h~#w_C>Lezd= z+)!FmHjav*27rv1#4!{a4Gxrm$wFLEsHbRvCWI<4v5@A8>m3F&P*IB$1Zgl5E2%e0 znhzEE9N)jvoAT)xHXh%p_WiKeywxmS&3;%`kZz6)G3O{?p7V>Dkhr?9zR^ACjB7`K{!N0vR17zo%F6KVy0*U()2xY|ssVldFS(QuUYB0Ys? zc;A-6VJG4X{ZqA&8H-XrlhZgR01FgfLcAI}cF*59XERs$LfBZ?(Xm!c{`xQC?^S6Z zUTD6(+vVq%F|ZjQ5xxHaQhI%2Vt<6C;$T7P2i8&Mtfb$Gln4mGya3`b@lM%I7>r#W zpd=_R-iew#p@0BGu|N`ByNI;2lg!yG!yh$QJ_|x0Vr=@h(OwBay25&9&^z%NR!niw z6G|<+&dEQypv|~{y)ZUeFZzU`cAM!5q~{(~Ji&@=zEGl*T-^c26X4 z{GQl_T42X_^%J7X?bQ9A%G)xM@%j7|`qMyc@}Gz0@zZwfnS%r7ZebO=KA7_!udx)& z1Q}qTAiizMDIma24JW3c9keKXBJ~mvi zin`Tt|DoTRQ?4qeSu|#BUMhz)l%D$7##MM=xZoLER=IR}KJJ`iEpF2yC7poJoC-lM z^0Aj-voIwR+jqsQ1t-qkfATRZTykJ~nMwK0ZTa4hLYaA%!3e?CZLX*N_c?V#n_kz> z@~yj5i+e3$hA^7)Ou(d8U_G49Az%syGmfWo07`MIk-|)%ix4G3b{nKXLORhLojW6? zg^I0@e`xjcb-VV0N(7l=ic40rwpkpg+;fd7cZaAIBVpJA4%AG*i(-_tnZeDqdQK{$ zqj%t-nqR#W{W(3ltuj3|Nt`rXt#--pYsulEk%dDXhCc+q#7-))gGWXfBgp#N~G8`%{2M77F>#jbdC-tVWh;f9{_{H zmguR;Rd~R3!*qp36#FC9(7XJiRI} zR(ADV3K*0dhnZSQf(<}ZfX5A#G?fkr>_-n&-q5@#I`DO_NvQU9N$EEo5)RlImS9sa z%48}?14d-Xb4kEpNa-d4P9ADqmp4TnLJ){$hTq$hrZ%aDAR(v5118U>)*n`+EL*okq;cyvn{j>UkdLCkLIH`f z$OCaO;*^|Ih=FDprBHM7L>g#~)~W{BKA8}%B_KfebkudM5w6@X#CK?9iIwifN~exH zQe_ndPiBAC6jQEFk7zrW9xC#^f|sKz)HXMFFB6MN-C)?d*sb2Rp&@|x z5cs$b8$+QmA98E;AV2^L1MKi!)vD~TUskv!>o1KJW<9H%A>HyBQ?7q<%(T3oG)jD` zDYn+{f8whV2;wn9x-`XI^HtO-;kIPs=#XQJIo*HRym|=seTrbSyV|i$Jo+T#6L?oJ zFfj-mz?un$BX`0ROPv8o*LbEL5}f`xR|o_%A#ZmWBo5L0sH{RM{uWaUCce|HVVR$= zDt#lw<$G*0MT?aC{w@U)lbR_1U75sOXFSF6j(hX6dY@ z@}89fzD%e<#f!qM!34o?lNkvM?caW)sargMx11z6&V%M$oskNr<^)pJ4Fb3jc&=da z6KFDo8W-71$H_GD`1*rih z5)goFFzA4d)7d#Z1}IxxZuIUJQB!gC<;;#lW&9kKjqLn&c~_O1Gy}*~bK`u@i4ri# z6~7XYwW-g{vfdB-o34Y5xSZl~#y@FR8dusK+$&;LGESHH zLc7fF@?VSzFOE9O>td0XLKhj#as#!}?6H6Z1gvmdMoAOJC`L67RDx2RLO>ZGM*tCC zaA6Zv$^!XUG1q3q2+IPA>VOj3gPhR=4|8hU6&uxnrSv~ih09P7J zuZ&7V!rbWu0DDOCP4NRI4SFAO1$Q`A5`a~^e@JbdF%Z`k2QOeU8*fAxU_MID1j!Kp zwH%N~I;@XFbbyz~fdE6mZG$5AZ?!0Sff6SdY zxMv>+d@&rT@D|2t9EZmM>Ff{298`?J=s%L<3>8^y82cnvIFouHB3t#!b~vKI7{1QD zLGkBD;utMdxd19ao)k>!BxN5${1M2=-N%RZHf5GmxELgk0)Ybn;sFU%iaLIo7 z2W~o>%684!qwDe0kH!ra@2$RQTcqjd_X_*#r&70QvT!DYg9v=^=*^fSlqr$YA@(w` zRcP>&6&ycRUPBJDL=$d>Tx)3@eqQ}hiF)Wueq-dMxjavmN=SI=?3-19fCB?4Xljl3 zU8Z=Wl~KE#0_6BaL!5A^Y&TF+x*G~Jb_OIRk_?7)aWjA**+K{=xFjTmj)5GEmo}r6 z491D#!hxbt0q;BIVLmQFlhCy1@rslXWp?W%^{AbQP)OMl$61AF#`V2bqn>tN_~p=h zEISJIiJWwAY82^#^k4!uPJyTFY8Y97v3f!m8|GwOa1S*4yZK-kojX5QZFaj@@yw|o z|NhEx}nMNPc!gOes`#ZZoKI?_Wypg=INL@yqe@{TTpTnZil*xc+)NOF7{ z8;A?;g-l5*ZB8rE0Vx3_WORt&EPE4oJo4!%dJfa~3}#`ICeOL03N}xo%)uo{T~EIEM9&>7NuU^5w9QBr~UG&1;^%&!}q-25uyQvn3vzje_Qcd9wci8 zhecsYFV4A-2C2f-tevByAao9MBp8D!xDd<`>MLpLhDZLH!>f+g?k>%5bI$wwS{lo@IeR)h|hD2Pg6>1S{e5I`p410`*x1&MR9>GL#{d^7@(2XN^js)(`= zfOvucU_N|O23fdl(yujaW&#LEf{)K4O(IJ>nII778gtG>Ixj{g$*3nBVCDoBGjC`47m2z^}INLy9ERNdbfM_3A;yrbs1N6dk24 zR|ZZ1F)l{BJslI1fB+|(z}pT_h%iYWS0^e!B1~Z@n1qOW_5j}ZowO7dqaO%of^pR_ zE4=N5Fe8Zsh=IY15L8zeva^JA45&GI$(G2>Ucd!rB{v=l5L6FJI7^r3?d&0rRv-zA z0;)?Aqp*cs>0wNmP!tXr6j6HEi%0w8`1~N7hP*k@>bYUO#pNyU>Fqb(2_~1No-E-b z33urPj9_B;Q$Z~h%M1hncR?UeHD4%zOM@5y7PoMXCD7yi_!?9;D~ zn>3Hgx&Rm`-b|JeM1@RHa)-Tz<5D!dDwGq!dtaL#tQbaOnh~tzm|>OEY9xE*dEM@L z(PO0Vt;>4nHqk>jmi4`b(S4uky+(q$#6gA>js+Ah+e-qL;1tIK;;oP>NCZByG-`2U zK=A86NepwmWE`SN2`D9Pd=X0;Jr>R!HCI!DGz#1Oz!)Y4FJKH%;5L-Ym6T32*U+S* zCC*Ee0wh=`B&CHIcXNGhKFP3{3`pivQVEwYI3ay96si;xH$-obg~Y=OK8bElK!yh? zlZMH+k~4o9bHr8;EZr&lRHc2*zkX%2do0`=f0s6ODe)G02$A00R2PBBP;fkTr~!d$ zQ4~bqMF@+K?BT$O#IgBz+ zfov_2ZkU^nyqexcmXl56Ju!Y%IO8*FbM-_LxG|3Mc;R!J2=js*$dG0pUO% zzYipUEwMsM1P%fs0bwza0Dx$cML>cXN@PsVI&umDn;8x$#%Sc)xT(0M1q6hH5~Kh* z!dVcfm_tDzqXaYrDBG1I2LT5G5IQkDk<&1M93x|dbVdjYQW^oFN?;^A2qU1Sg(s25 z)J7dL7&-(H7&--#Dn!N-A`1>^9P}h321tem2PhayQBDLBk|{fK6#*^|gAqH50|@LX zCQ;8$WH5~ow;&oo5{@8{9T7T2GX!oY25Q^b%7QYnlH(}SU#NizrpnGnE$?KP3K9$uI7SUXgkS(I zkVfgMMn?l*Z3ssj z1SJT;fYw@82+kt6~J3=%K`F^NO~NeK)9ge{60rUn@V$smD@ z1`*L1*boqiC6H)1A!4AVm?c2TM;!#A=9nA>k{}7{NXO3Uf<-U^gn;1~PEddXU?ITC zl_*`fI+P)8JP!A}J&edVNrFGC&ZO z9fF8Dp>orMMA#& zmq2qcQ|?M8#8ZT0@0DFl8Qm+1kTJ(PT+Nud+wN1rbu1kM;T)nd#>55_CCJXCNY@2Z zuo^MOh#bhDQY8=n!~j4L009F60|f>I1q1;E0s;d70RRF65fTI;F%T0XK~WSSGD1>e zBQQX51v5~AkrhKzVxc5pW3fbn!SOV5@c-HX2mt{A0Y3xnP=Ayz*$4R(-5k&o9g=A+ zZ3#!M=tY9lqhf$MhGuyNAT=NR;PX&i2}6+G!jdBrpdXIo>jk& zm0e3;L|J1R+SerTK~8rFqLZ%CFziM=>s_vCa0P<7AlJFs0!nTVdz^;ApMYp(VtvIN zH0^CjThDd9D7c-N2Ye?4=G8kV5UPWS%qLAqn(C;(5tW?5OvOox7K>Kn6vtJdAna>T z<+P~dFZh!RNS?;YuO*FPWgW?0iL*)w<>rh~yI2!2xMN?1M1Ks0&d6Fn_OWpY zt+z#ojgOoMiiP-|9gy1QWnM^~jTLYUu)9Fur-Z`l9hh08wY(YiLBg42pN4Dmdz@;b zf+&0hsS9^+`y<@JNoC;~$DpiF=2Uaro7+k)*-&Be0rxSwAW3L9Ab;AZIND17pzuaQ ziSrUEtx<4|xklBB_>(s(WP=r`je&skQb1H~y1vSMJK>7zjMNMdcM=$I_W<4%6cMmd zq@JX#XTXMl@joQz?lANUFA#dhRnc;XB?BKaULY#<-5ydf~~H`$}JwjPbKj>h#HP0I305e-qsuFu#es} z(LW6?Ad&)$1Bi{+Hrl3xh+&QPGSa`NxBRSKfO^=Qf@^cVHU>1*xy5e+$?8yF6PnkJ z7d7D?>bY!_68RVyc`4^~(J&fOs*0ZKh-J+QVzdjE`afEzIfYdGwNgt#LAD0xRm}sM z5?h$dB@|>5=RV~r%~vWaUfWQIsZGe!kW$HhngxY`PUBNXgyumQqhrlscvxCEPNbuW zZS>{pOrx7#M@2I}8KSjEhJ43_Wz?rPcKb-73B^@aS8kz3GAj^V*H9H5cVeoii3~*F z>QKbHhj`kLWck?8Do$!wGFZ(jXqk}*qQtv#Ziu-FUfmp2c&QognfQ}|&4EFVidv5e zQo16`L$pvE=t=u2DqUdZs(8B*`zjP%iBkLrk8jyYOSp8~g{7J(O~8a;b7j)L>HH%??_j56o+!v( z)!E+BS2f^uMx36o?uCrvmo)jIE&x9+sn(zFRb&XMq9(bZ@V*1Yb?B~F9;*o<-rLDqtm?cGxVKP;;tuB< zQ>AO}_(L_jDcNj-H%fR{Vso`{PInEq(wgnVI>|+JRF9VJSs$V_2JTnK{3b_l6O^W^ zM)N_HR_2ETHfEu9z*c}Ni+FTWI>a(2(d+fgCpqlwadPMRmg_QvNOI&?+L z4>iCuI;mJGQ>rdld_Z^&8mwOv!)?#1eyDdEzANdw6<9{h(Cz40s9y3Y5p1#HjXY=v zaFbfoH0C+3@Dz7|X2$@8n2B3!XhVIGekLK98K5L!<41V**w+Bls+uztg@=~RFXx57 zjQN%{)_k@_Q=-qJF_bk;qcbS{hcpHQeNRrC0?83hRM`)W7Y<=>d0L~Uj>*b)I8DM% zi#~U!8fen9TwN8U`yl4C#V7EbVlVSrpSq_F)$dnp8FGb}w9}G}`K{n;j#IoY%?XdX z#yH7_aL>7f#GE{z>UCJQx$oZjR{SxcwyxLyi?kEuPk4$Uan%{Y6hTeU|jC;@&Au>>+Ykrx(8Cv5zsDw^iET&V@+*LSNA)|BjumUm4{xop8fypp8RgHo|L zFwQvNR;C6)K_H>ls_Tu*c_8?X%{LPFxHL3ovC?j3-LPmgMt7~_x~qxjv}|mHcW&*c z-)3Fi%c5vBSTST(=2Vb5@2he`3V58YEj%KScB$6 z5i4RfIh7c59voU#`{y0)Y~@9Un8piBNgS3Y;?V0TI-vV`At`M@sH(b}Z8X^ft(B!5 zO*tv%-s?=e>~&bB8Px$)cv){UnwcA}4n)y420HrHPgZJFhUZp^wChE1W{yxdeJVKx zADP`0jWSWBsKppX_h-#S#=X@%mf@fqGD2WlS7u}20#n8U#WKiSUlNZ+Gf1>oRNO0?TJB1G zDGxkh-2Psw*K{g}qJs$Oc(e#I!Pbi4pFI(nxl!{~jaNCjTvU=8RkD;Hp)xx?Q{Cuu zsntE*+T@+yizWm!MfS|~X?$hC(2hx!<_eN&p2|FAn}`Af7e1(CqY>I!$zn0AmKL)- zl^E#DmDwQ9ZbXiX?UYT4&hWOW@IAgTh_v_E`$B6>g*Fn@AR3e!8ir}nk}_Ha8>z0V z8&f7gt;~-ZFs5LoJ}Lw-OdcmDGB~^;6L|qm55g>n(J_u>Mbc(@6&z-}&Pxm(x~qnJ z9ZFTgv`)pmyp`IIM9c##bHF1R{=(0K1x>1-@kk8o#4;nSVxS`B(Ia zJ#Jspu;1*zr(s8srr|KmH4%!g6R2DxkXHqI-Yw$ZE#lrS>fWvL-!1yzuF$(e?F)XH zBb-(=Zy=)GI2s(5>)jU6?6pM`d8wkz7O8_-glibLjfw!+!MN&G+>KHDt}Q|sX(eT7 z774C`A_(TC!`&ct#*6V0NCPQfbjCZPbagPFbm$|eg<|22m}j^*KJ_CF&a2`3r(0Z? zHE38yw{&s{X`n`(6J^}WYvFP@wh)ZX8S+5y4Ni&DavCV&afH3I)BRL%Iz|!&O>~sGBjIPT+_{tc0Yxzz?1&KOFQ2aj^{{Z-?e+`#eA4S+-{y?`* zh*H0Dv{l(!E$_PCd!qjUbY42B?k{Va+KQ||(>+wq4{(ek+Dszy(yCV)WP`ersf9l0x>Z$skxF9GHySCJ%}a}O&RGOrU#ez> zH=-ucoa(3{mTo5aM>QSLG>7O)GURN{NqMp_3wfsown7`ATdb}mXR*2}DlSNubKA>p z)bf;O$qxl~5lx3{E1>49@V>}lUg2AmI0+1`uZ0n^yDF-y?wm>Cs%yv#c;&q-e0nYM z=(pa>dbf>R#-do^LX0rGs@y`iP4z9zXI$3eFG}1+>05}s0d{!5#u1Ex@c308=F$14 zLiRrEZuUi+#|OQHJ-Sh6&4o$~th8#QhGv|a2fBCRK<9B^(pmm*_*?bATjjhe=X5KN zp;pW3Td8_2#H+JO)Go;HQtY4ZyCZ!|vc|hKeHP|1ac>}B)k%sXq9Un~Jk>JQJa8P;TxZ!^ zRf-n5Vwvz_f3j z%GY7r2u^nm6b;}N2uk*$iS;Ym?sZhIh_zU(R48*_{g)S2#;X2`^XXg9Wxe)W)~(}J z>{qp170Tsuxm>PSmAor@R`jDQs;?{m0J8nnzh$_La#q6KZ3q~ThSH0e&lXiuVq%EV z%leEwgOL0{J4l>{KI2OW`>xY2eg=iTdC;{JFaH3gynRNh+>odWwC1;fb6eJqYC_jE z=%EeWfM~IC4MAwlSiu%==gE8~#i4d+%Sc6_4ABxgChDR5mVJ}Q0j8;u&1ku;QPu1( z$O^A6$vh50#10ITr+Cn*By-JNYE+XKKqGT#d2N8wR`soM|4Dg!ox(@__b0BJcQj- zmo)DUc7)tKk7;lj4Uv9eOk{D2fttpf-WKfZPT|{!X>lqXGo1Pz7MTS_Afh=Hd%C7$ zB?N=S6EU%$5TXu^{uhO5RHL)ztXB7EtqXS5xeIW&9_w;?t<$|q$9mPS^(sl#d`0n9 zU(sCeN^Ny1-BnX4QQ=iic!Y+K@4CuH43sP&9;*{w&<=kz=T6Pm$FEHl{S zrK@!tsl=WtEG}*JMUAegh7U1C;B{T0;3Dg_YCEoVR>nZ%TdItn)M7T#M;o_#b5p?Q zune_bli^X%8lx0#h-&Q9pJZ*dpeF~tjqw$S2eY!f4ATp4=4h)VKxhS1JR@VgBqLU` zg%tGY+av;`%d{FfMKOuh@mG>&Dwh|7>=Q9@xrNTi^g|gHQ%%C7O|=T|b$Cy8%pg}V zh%Gcd+z;jwv6Q-_Z!Nj}j01yN?pE#?<8_BM31MM$j^a5{RyIWDF}kM4;2b)yh%F;f zpA*Td-MA+%RY&r|8yee_NoE|4jPV3#VDl**bA=Pd&7jhq!?y#gmLEHnfm)_DdU!)r z-Cwj%D?BeLpNUHBoY3H#_>^?xVXAIB41Dzap(HNSRx1I}g}Pg{YPDLeD$=c1D^&u? zRcEl2)O1ept?*F2t*9o5NY7hUBkDhuD_HLqYD2lQ)}bMln&uIV%1cO~ZFz&h+Ry6_#H3f~;4 z0ODA4Si&giRBohh6s~&+xTZ@{nIkGE#?j$5*@tLzKP(l*!;@&9WZ*LzybYU{%qRhW*&E~+U7!fxr?77`a6UyBrav1DD{^>*UJWs8E zRiD{^SCjm0U)BEr@P++ZpW`d~q#Yi+ei-?j7i+^G?ydG5{{VTYJ|FTmUe8chB2Z0E zM?#&dCgW4K_@2wIDdmn}HWNe5Z*3$BH!9?tE2fj-gQ8~)f$w-}<`j>CuCDOZa#QZ# zFNn2C55%ivVw(=~`hd4`RB-uI*y~y{qQ2GwUfh>zZtkpYY1K)Hd!af100_oqgN8e4 z{{Sm-A&iQ1L66-VgR4~C=Iz-!wM-Ij-I1+SEP1WT%^{~SxGXhsKV(Y;c}J&Jy?diM z(w!Cp>mPMjH7R!Uzz-74cw=bdP*&XaM`PPVER9G`g zj^SuSy*5!l?Y9EJ7+#;_J1`8WaZZh=lI+p?S7nc)>}*|)fgCU^vW3}^!#;;4+2!=E z&o8A6B#W`Ts%9F9Lboe%!$k}m%e|LnAsjQhqla)$XlcnglxR1D?5!t~+`iorZMhwG zN2-z6osHFTbV05p)Fv|Ej0JXgrs3Vq963JAviq*eI;dj|YWy_TC;j$H6)1u^IZ^H7 zYTm!niIbvias*>{n5VXs2<#(h9r_)$SG&|uo=-xqG3dr&Qc25mD3$u`<^VCw-GDD@HY;0Qc6pro?kh zjOVIr4tl1r=c+ZXU%HM{n?)P;SX$FGQ~XCIU3fxfXzZ89Q-Z>e zg|imT9kUG3=NhfZvM>>DR_krVqP5yOVaKl?3n8fJb(sD zDcvzO=&-U@O9{)8%(7Q>(QlvCVchv74XWL@qL);4*nU%8X;Ryjqq^NfmAB5;qSZ<% zpS*ohLuMW~H&7KRO32-LDID1I;&Ob(IXGGy_D;F>oXtX>2RJt{)Ns{|d`9F`VRqu$ ze+jISkx&f^8`m&`XrW{1=9<%zY*eaiDXBmXLaTJ|V2d2bP@z?t&Z>s0-X$9;CT>bKkYi>XU{lJU&Q`esqhNAP6b9|`cD<$`%% zT?R@A9O8e8yD_cE9hf;Q@YAwsM^%9>4mCOatV#G*^(zO!WQ(VB86NI$y2APX$VOWZ z``uk;4E1^0_?wg1RTZsS1DbcpkH!`qw__{m3ab+*4ZTrLa8<~n zQ*h^c2pS^}xltI}bwoEKXY#WxPKM#4-0+Z_9RbMzP^uMFKrS0{8Ljec^>T5mve}w_ zRE#xDW@Ob!7#&kmntmvwLK>sBy6u=%g*ip}ZQAiikg(n-cj~IDs=pC;!2{o88+*W5 z%Ug%j+havs0t|CBeqA;Y6$%wrIcMFz(4%mi7|0}b=$toWbjFZ5p1sK3A(8S1TngSv z{t~5j@w72G9!fk=w~DpqyF`BI;u%Al!?uyvs-1_%-xE_EykKxxpU8I13{?1qJa8Yf zD+3+UUDLTmMMXtTlW|KMQ@Am}`p0t^;_0*Gu>Sy;BB*`@sa;mEa;D4#nzLlt$fFgM z1&n5o*me`Ls^wpk59YOD8~Km@lZu*Vhi0EKcecj?9~#MrNc8=T!|V2x_jvs++FTD^&S_%8Da&O+MRk za_;BRV5HtT*?hmmye!#(Wz*N9$9_levk1WXhkl61ooZ0hnxXEfrFQL=Q8mT3#_LmF`zB7te=vUgb#>T*nNk%p?OtNT7E*TVHyo`qrnG-v{)X_m}6vGPWK>k{qZ z){WVO{FLtrMYSsgX;ya&+67RY87x&-D(t*=toa3rdkv0jvjuNs^@V#qVO-x+Ryn}D z4QQhLi5LF>3Y|)K?n`o$oHdAhEHJW^d zs}T2amO6zr%j{xPf@X^5Y&11j*Kku{f;M%zPyY$;tRgUnr zL%|CNviw3{h|m*T8<-a0axMw&CSe$P%c}VOL0j2Iv98h8q$vv~a5d3e5mr`AiOFu= z>vFbFSx=4O!Ob;yHMn;-g0~IkQve97W#ky6Ypb2VvT-r^ljf$HCt|liP_RBnxQeOaX64jis)9IVg{HuIuFnslc6dE25YWj8YGznxBM=4}9abar zpZD0qYpN7-i5aeHe;UF5a&W6k#I$g~s?Z{8QHhbV_DC-Yk8F(&+ln=|A9FMxP&TaYQDxk! zs;UPyFC2|nqFE_#MBLN5R;vgLoc@-mPXemWiOn|Vj#1Sap|Z6pn4b-e0jI9hqN;AF z>~=}~P9&@Yj`rSLabKS0MT4AD$K}xctZc$VVC1N<%-uoZmjZ=1<)y_Wc`;a9m{<$YAcg1Y9sg1r>~022^R2>}fP%~v|B%DrAXFNe`?;XIJT z4sTtV-dR@f5Io?j;v1}WTmJx5P6$mILP-5e_MU2^ssTP}BX1(#bv&X?1*(a!w!1S} zLR}VTVRZpmZ_BSzrKyd|7^uKQta|v~wyUTA097D;#FhU5#{JVA`m%#CoGC9C6;X>l z7U{t-QNuMajI0r)s`W35spUf8**Jwl-9&tWMH6?9jn(#DoN|%D7Cd-oSW3P_BaKK} ziXLiQJEJGl)iM{!;R(&O(Cmf5vI19Vi20T*vF!vARfYV-0g_vJRaGB@H~P67dC^s# z$kQycQTTGnRaIuQ+D_W4s^o_g);ABlZt!Z|fM&3)-No+Y)*X}Ix~pqYnGNw>JGJ*f z?w~ffe(FOLqXd($BxklJ*xD}9Fk=X+jNHbOxomEz?3mGo60p)+FjZAkMB?IB?Zc9z zy(K_S<+binQ)Ne0dpf7#<0PBPYz%4YgtQtXA#&Zc@>{l!ND6y=iyi4Jb0?*4AO63} zY*~uP6x4fjO=U#@qA@&qO3#=-CgQiHZ&g@%D`PrP71@ueZl2urLUGNm{uI(hD5J0b z7Vio*G9TH0()4u`h7`Ag>Zl>ZJ69C!9u$9iiVe^f#!2_tD@kDuQ8cnJ)a?*%O2uL! zKSqRKYspudr%Gv(WTYJPRB?T1w@!I9P{U!klw6M&vNhRs6``~ zZa>LRWLvsAVf!t_JFqh$S~p_WfSnAEVB;zlmPF8YRW$J$nOE%KTp5%e|5w=??**qc025W7WxwB^1( z;&PJ|;o)sQ=wl@4yJ7S$*Z!#D6KMA-DVeTlAPLbH7Wjn|9l#KErB|ABk##^t+XLto z)iqZjZ+%tY?zhzDgYJFQMyEP#XjY08$lGpuqS|QX2b=7yuaN@g56V}HXypb(waS?; zXY8J7PqN8Z*;N|V5VGo>BO9s@&p!%x387W?U$USfTVYZ=Hj1Ygc8gAmYrv>9TiZhj zb!(~))}a>zqRfK3ID1Iyn}TJuw4n*o#3FWv(1c_Bl~rqK&}x811DYtQU1(e1WxXqU z<-B)QQ{5)JXik8w)Q8AdXOo~Gb-3#z0^LS6m-t@*dWyawq)UC$Hm&>pg5F8$S@l0H zjodej#GLVOy1IzwPkc!6<}0*G)~kQjjVky~u~AzA^Es<<4~9|uuFU>clwpv03Z*%G zo18hP@%Xb;KBB7c@)YQ=H80H_<9swJ+t+HhWfs$rP0KKh;2B(+A#p28(F-~*3S^pS5FiZI5J6BPXrS=OQ*>1;G$X6; zPAvm6obuODb5fc)WU@r6m1!#Iq9LbMGXuv%)jC!KM_p9hMJEmhsn+yBj*3Za0*S7= zg{yMed_y!a>~4cTt9C1%Xhz>aQgBE~(KHu6{%ZdK^;^U2uXj~fbwvB4BL^^y%&1J9 z9SA7SIfY{#p28L&d^-0-TzM2AlhV60y-W*m2D3F;5BW)70CI6#$cB)u{{TibF3~4d zym|8#Q`G$s*CZ*3pp<47<*k1Tkn;K8x)Qu-;=4?rb-xG26H2-R7MzuOg=;-4cNtrj@_OAnl#6!%09v+Ap;Av}@jW4NdKL8K zR{V8a!0i>a)GgYvQ`u2bQBeWzs8MQQwl^T~0=y#dgOk`xs+h{i6uG>nvCNXY!*4`r zuJ%_NCWBD1P`Jp~fl#HE#|HDF?9tB4-nUV+HaCmiM?&5)n&I@LLMdoWV2{C)-nKsx zUA8s6Z;3k}LrYP`SH=;=H=#!qc{O&I`vhlqsbBuZResAgL@pXQrcWoLi6Q|_Hl zP*ND&!rRrbjb7rb0RGu4e8J&`tF*t6XkDgXQW(d1+j74Q{{Rz9S7(LZ34fK@B;{+7 zhDPec^y4Jgx!-kLPZ7ylCOJ7dg@%s$K;k)+CzN@YX?HfXt?Wm|5AeU~&#kL-8Su^g zuFCFCVMh-r@*>v_T1U&xRI7^5s-u0-4OWk2VQJ)!9CaaXWEwdUCoxn?s7+M%P3YjV z(4sh~7VbslB?9He?6c^r{;H#*hs2$#wY^mt(5@d!GSJ&l3f{?DA&GDVJlpQ3b6WhQ zy>I>3X?1v2UIkp_+(y1-+TF>B;-vNnaSNVvC`AiRZ&!`s$xaF)% zy7DMt+<&%E!pYDDxTlI%1!N;xYxY&c^m$v@#*mNQc9(K%LhTZtIxii)w+o3H}bnFJ{cHSW|VnRW1Fi_%&Awa)xj1t znyt$^Ex_tl>E5+up3YQo)-_S-A#hW~fd2q+sC|_Wr2$@4Te|Z_g_T)*t@LBeyG(xT zwU0qj4dZWfb6Dz%5U}p7Qe22_zUqxkltv3_kwxLq*mYP~UhBCldX>AcB3JgqOK7h4 zMUA%W#d;!5a^$aPoDw~j`Yura04ubfOkiEAPP9gMpE*~1l~q-m%~4TX?1T?GFrkH$ zr59(Wval;58p~g@t{iz><-Lq)2wkP#R5n(HZW??u zHFjA!TIA?tZmcWs@+hgZHE}`xLf$vht_s#3i*Xs`yE8r4XXy7W)Su)k$w!zgr*9xu z`d>nDEpZfFNsKRe6-3%7u{Cy!e~}2eFcYG6bSa!ns>7NEB;|&ItVb6ws-I;G*&yO^ zbCX$y^(;f8V}XrFih;gRrN&0~mI0E@QBhF_dlWG89T>YK`Pvs}9%)*vfPZkGvbq7} zar>?K^2E@)Oq{4b$ju@1l~Q@1&?u;^3WwI^Z~Hl0?o@F(`7|cry>CvzdL8_t($hni z(r)d9%JV_Ubxk}LidUL+f*uI7aq&4r0i2ru04U&GJT9-@5l@vxq7ukF<#wq#+U37t z@xtHPf73gY+P6_34F3Si?5^^;u160j@+(NxADL6tUgf<#7W3I&)slQnypu3g!;dqZ zp)nV5MtSi$jY)O`?JU6U8cS$m{#%w ztVDg4xMSyYe(SRX;hCj&Nqvv*j50S?75x0FY^uUAn!7-yU_8+X;rE(qp9|TjR+Wdt zyys~{2`TcXi%k=QBphzGikM-|#cv=QLtnb>G}1N&+GXocvNF8+-SesJe@4T)k*7a3 zdc869YM-yswcEzpLhNySgvgl-g{;?_-Y(C*kUz??PY;(aUr-4g$U=Erq=j~=Ioj{p zZ`gcL57}1?e6MQVK<^CS%Iu{0UdJPan0XOnhOKw;-KBH|2xK}irZmSL3MS)9(Qcxy zbAQ5YaZ}-%l+m~(s0K^lqZX$u8 zeu^V=v}sMN1s#%+gLi-ak&M^Gt__eh)T`8x?;6S)BZTPm<`xb39a3+KdkNZ`~G_>M8ZX?;1VGVrCw7+TG@39s4cpCs;p%sf9jw zwQeHihzqqz%F+8M-7L?h@TWDjQ?}I%Knl58@%|Z8z1Q1KIillCwODQAgM*HV?%oe} zt?`aw(KX6%%rx6YMVIO@mIv_)qE96t-XLERE?4x;G1T&Bvv{!%t4GjG*AUtYp(u(s zf2(M&t1<|;Iw)fn@VAI4o4ch&UFB*xF!AwR;^YIEUlWaO)ATxQf;Up|P?VZin#4Pa zsk}(RG*7rAyg9iswOg55{HjziS)1r9$zq{!enCf8!rPPG!*0K;VHvBGrie(}t*&a& zQH(9pd?1f><7;z8*`nK__a8wI<&}x8_HtJny_IuJmd90K7z8)!Pc&5Dn14VGbF^tq z!OZV17h-FCa|LD$dA2p>sn4w_DMaT78mu^|L=`P;HA5vCO=_AO+n0$N*FV+Z7Q__U zg#>m?O1gnF5MJz&G*e1jYnm#heMU)vH=;WH_)iH76y7@QD6A$}XwfQP8T!1=f~mN9 zU!xGf$>t%eaffG)TCM&XKMZw89`7RnPQ*(6U)~y_G;sgyWNxB%Es+kT zHxT*FD@5QyHP`ia_(G86nxu2-l7p#2&^7(jwLa?Ga;;MpQP=0d+lg4V@M)&Ve=|Y* zseEo&XIBiwatQ16;wQUC6x=$z*}S09qrml_RI zVclkPDfYjer5rQ4Ij&Zg>$4!-^-W|)##~nLG*Tui*Y$DW3QaaORa}d<14Pq#$SJ9Q zzzeh$Td6$-X|7)`4r)c&syI_{?v3{XyAz!8o~Ja{HI22RfGONtmoB}^{O}936-~m* z?=SL(2h{p4-WEMwRobCvf;8-Ohd>&gG>_Hi=V4qL^ydX8sl#re3N%$P>-xA=$xRrg zTe2`nRu#J`@zJ%|zzsoFrbL!-;Q2N0HW?kG3K$sbPNbiriKXx1 zRi?a=7v`(BD6eCUP=v@Rh7rygnkg`C49Mm`S8io3m5X zdBPMdyEG$Cin9B2O`ZlCBGx=t{#S(1r#RCsP@I?2ibmS(zD^(Vm7(sC_PMOQvhz%tgTwN4N5@~K?{ZWdR0Xodj!-nTed{7$OvaI?U~>~x1f0(8H}3+1GJ%E0-jnaC?0LmRc{{O2~EWxFhd&l=~Vn&vMT%V?X3->~eK#?2~#_G4SV-G0@en zQO5QcxtP%oR!NJ;i&Hhx!N?8+#IguL@++N6_{AHHAfp8e=UjF*8Kdwy87Dfb%hd|={cI?FDeny#tWhXQChpw6>Id$_iEYSBzBDZW4vVr>+$6NF z%1NuGsZ%zfXF?NEj^@!hk7M%HVcTG;k=zAk&04{ubon2;!$&Lgy}`R4Qe+pbNnKy zm@<4ui?T-DYK|5orbim&jENUN9}jFspRcC!rUA%f1YKBa%qIMCIX4wjM3RdZwAh*N_`WU0$ zzMQ$O<3|X|sjkgjzUw74cAAYp>^4OCJS|b+eQsOGF#1tH9OK{=PJ5r?aXUt2HH3p-{m?<8|4S= zpJbW~T!vM*WQlyd>P)Ctj^(v(}XK>X!h1JX^Np^~68z>PJ)OXn<fb<-1oXjY_(KPceB+In-pPnv$|ts1DIY$Gjt ztW5i%LE|=&vJ`HRlg3(#9r<|1@20WUO0wIc*JkZL-H{AlM$n{V#to5tWFl*IP~T|RS%@? zt#m}F3UjK|Nd;l!E^?d_?GNkt7I2sI*(>jn$K|_*1e$?NYXx z)id(K=A(GSmv5MEK4pXhbFVbs2;c(H@Te(bCw^+vb!UKNqiwY z4I?=x(u0yI?+sTF`mN=8WT?@bmiRYAl9sn;@^CV18U40mBi%|zBj1gI@1ZnJjgjaT zQELhCg(eWztkG_yVbOp6!o9sh4pu_^?jzYX?C{2c>ZFO1ebbs)>rd;r7gjRfn~7NRmg&u9AH;O{{WwpfFBT@nB&IY{1NXR z?2eyha=CI@b5F3EWVbP#zy)qwU1*|U8fJ@WqRGDM?9|8NyY<-QF+Sj!@b=u*iGzIH zyg?eY(Cgo-!{T1)0~`~c-fMW;Y8E0!xwh+76HIK~xKN=CZl!fX82TbaW1=~N4>Zg* z4~s>=XEnZVwaG;#GZ}cSl-w-N z@>A}cfPb!^Wcb5IbAKuCi`QSWzlxxLDoN&(S0jt+2qdaKV>bd7DmaGq$~bzg@T+GD0`5XSEF z@*^naL``6eZ567(w=L+m4O^Bg8h{-;ltW}#SIg`aQ8pn)g)HocU3w{IbjGJno%|bh zdL>F#_4ZA7M8~b-mlViGYqy$yNl{|iE?yJSUZ;LX-9*X^lx8&~{gG~EQix(<%ILG^Rwh9y4UfXrVu*y`>X zMXG23fwuWOlvMdGX@7;JreLIZi-kpnjy_dB>P$p&YJX)eHO|SLDD~ImL6kufpRjBS zOg8dB`~LuB;utF9K%=C%tHz1hDNRa@Cag#0e!SS+?HWpM4pY!3;2Y<1vkebKanX+m zB@BLk43jaCznwnm*mxz)JMjMiOVhBEH_KnLEOT`PY^Qe%lqs32^HXuhBYl-t5%zCi z(W$3qNH{Cgq{%(kxu>8}4r-Nc5f+j@OJb4U7803)kodS$a18S}mD!+fpd*WhJcs-z zWBm>OQ}Hst%}=+>888G?r!9>rAf(1MrqdD=z7+XlPPC)N@fBkW>Cq z{b{hs{TfqnQXOazz{WgIRl`H+PRF;%)a-1|=o6%Sj&D8DR_T&%B4+v1aqk*{P{*+N zm{#`){b;v1Q|4Wg9#XEvIZ78`KCrt4o}h=oWscPhnNF$2O6yHRmvmiR+w7kw4UEaE zs^uOH(#L(;uE6o3c0`g{FB55wEd z<#tFJul%d<(RxwG#ylXDe-M8rql%f{Y9dRuHTk$vSt@g)en^z_aoF2P)epYT$seW2 z;N$4!91Mxo6;pWjM1qhPFoE?n`qsfK^lEMa-e)0$g_{2W%HLt4(uvsr0810GlDpYC z?slY}_yxw+jMR*e5Ba2*^a>aw#K&_kZ{151;XGf@d^ye0#4gb&^Of2kApzm8MG}a) z;bWWeE*c)>O(zP_)145)%l>k)ahTpJ;^l&l7DsfntCXFTu-i(gjz|=7EQ=>D-p)-c z`iJ^2>JId--!HN_xsRA5EQiikXPthF4U zW}f@ac2NF=nQ*g4o%S7A(^F0hK4A=fv{{RmbLZ7mBKhVVNT+j1U z?2C)MM4y*{(aukfIcQ$cXoUtqQ9Bm{(UfLA)7CO_l zu;Q39(Kb9)!lh&0UiLK6PRlI&Y>Pw|D>o0RfP6ZtsZS2ZikdFVZxDS~iEjPY==EE( z)j9(9<*KH87||mON{ON-&?0hN{{T#D9PbNCZVqG0iNA;Xp0_x7wWscmE(Umi;XCmI zv+?D?!LjAz4fq3VI6nb`#}q@`ioMrX|c6%>Nkkg56* ztF#qO!piS2@|%Enndo5QW~%%=UX<^|7(5`9>~x1pH9Bb@tIyOqZ(iKBQaRFUqo_i^ zA-OOoF^gYH8tINb49m`g1w!Ruqy{mjc6^Q87br3zMQ#ii18*Jgiw+5 zYx0?s0*LUctViXj1sI;&*nzQcIwrd-^eOyKs*^Vr1n+33t9hEl_|qJL*@_<*>%&~_ z8cJ>oXL}}oAM0}L@I|dZba7Ber9Wiv#CeG6{*MfdwcO%vC&C@)i5OC4`x?iqQp( zkB0H6*`LzJ1dOFo%D>nr zy7|460xI>2@%J?PaU0#ENld~_dNd~f9z4#f+~MNZpSn268K?Xwejt3TPQ^^`JrOn3 z*Xn=w^c7$JQw59-MC^<<`D!bgh!Ko)3#uEUc5OwXOp3#}w}Hw*c1n1_aA@R&uERfv zPKqhpHk$NLepakcj=fa=5U^72jIrwes7YLtf|eH= zxksj|`h_{DVwb>yqJ`H+)6^_pbZVV~J9GK9`D0LuEjpv|&xaNPJEKH}Lqzq?K$C`a*U-Cir-hzYu?wpeJG_ zey>OMEIgJw?xvRZmNWNG_(5I6RR%M6!2LR_uFY18Ng7yd$r+k_!ipaOcU=)dz=|Lo z*PBq`&H1N3<5WdO6L1bE#3-9aG!8)wL}Z$&q~4uArZ?VbP|-S$c|cI9A4aduXkC*! zx0-GnJj!nY3UAGK(Tw!XHwiQRwEL!h9DL9I6L2uMT-ESH&7nIV7}Bm!$GrGj0+M-O ztIx=<$ou@AJNfr1^nyav`HYmWCBNXcHMg5k6cj~F()L<8p@Tqhs?##J%uP20?-sj6 zjtb`;Pu0UQk1k5qN-jeSyRC+xt;4yk(4`b2#}*W51xnsW)f~o46&+e6AHUCQT0asJEzyh=71UKIeDN_d2h?U-x;{eP8$Wx}J}x`aOdT zSN~NJ@3q=l&qMi`JC!+;fezo3-OJ4-w!2|J5-lgh1mPcAvhfS*mt5GQ_`;3HF?%OIv_9p%q>SnYUnfM-7arlEh_3T-!1(gRLyn!}o7w z&EwzbGj4<$ns8~}yy$7|3_5PG1Q$pazEc%dC*4Do7eqKpks&YCEnieWfBvc4to~fR zCmh(u9nqos$B2(ok@m#=9D3?-UYSvHwV>~4Slc`t?_>jJ{%d8w${22_B9i&mPpKMBTA-O0{^uJ|lDj}( znZ(+E4GH1Q&L4ifYvX)u7H9G^q<8upB+tGC}we)W_8 zF%4c=s(iG**>pMlnrqhG^@%C6fvNu!}ik#+*H{tpJ7Y*V4(LeT;q5y zSF>#=TzM1EgAb`!MD=;U`dZ94JqXO3?tD=B@@)t`PaF|@S0}Z_Gy=SKU*91039D&U zLCs$&tnH`w*0NH)V1v6Z7jXexKrg+ydCW^Symi~v^X0wqn{C#;#{abspTvvRxAW2A zl5J%Xu?BH*MSg4jdP{0+&z@c5vtQAx2;YB&MVv=B%A>?5b5e&(bfHeUzVcsi2P zG=t`JEgt)MdEOGR`FD-+ZSkxwrEpeXW=l1f6dPmlZJ`s}3+7(MGPyFx=uyw;;?~kX zI+Ydg+!PdsITL3p(=u4NEzXTg`9B5blyEc^mRL)?IQ)9F@dRh_Ol7{aHtuEj*vs>m z-*yg9FKBN~Z?n=jgxbZA?{<9Sy`WpdFNbG`6iKetET6xQyL)fL)A#N8+wf<{r?kBm zxN-8Y?;)qZFBq1rn9IbS6ctnl(B=DJoicfb9YjFy8h%iW(cDIM8sp1N^&+9SH_m^qq1xsyeX^pPqO+#oW^ zbfZ8nnnL2O%2c$vaK}Ec$jj}HH?dPNAC1cPP{~fdnkgT#(;OqS;sMF`C6$3%wQkE> z(>?~&SFi;s)@u|L|D!Bi1?BZ?|BLHvSD*!5E4ScyVN7|WeE*%<&4Zto|A$pjL`Qi( zkBH4XJ5|E5hN^I%I23$fg3GoDRX-D2f_)28IA9K$7@;q^0decXw2nD4ufp&q94Rtn zycqd0%^2WMR_5Is_rN-4ycMFDKCNasVoYX?o%5>U7p!w8x$$xiR`c|~PH&6_?+{!sMUPC)Qdm$_^vVi*3rz_ZzMc9$ir`Se`~Z zU))^wNGe${s5_bXSDPV-+BmG(>IQh94lwqT&f)23K%ep0%4>D%EhcDD(vhK2dcz0N zP}$_Xx}3P{Ed^$6^z=vNog4Z&$?SDIP_MzCHSy7Di+o$M|3uuIP1yEo&;PV}s2+MsQ*9aeCcUTmgKX2op;G#H`PfN4KVeB`J< z{wkH-K~BY--zWONC5XXG|DnvC#YI8A!47}u4a!@SCyMv4@ELuwXiU^2r-(oOrH66~ zcA>C_J7<~Za=~^4kd2bcvIHB#xT42P)dZegT{=n#Mw|*A8sEDVoBp2#trC_l?1)Po zlHk$*d)D47ivl3LSd>PFm;ZilX<84QYNDlOl0vk+Bo|ERH<93ORA)b!9t4Vc2)7wgDkdjSE!WF(Ag%G zFEUoEUI)7Y3a~8(t{brU39;2Hxx8l}(CT$sxZStZIHrQ)>Jdtfm5i^z8mz9o|0cd% zX!GsdbUQB42*S2uc8@e$k0$g z{1C)5RBQ-e>BsZI2b$sI-t#e?_1?bMe1a<@&zIs39^4~yYcBn1DhpB8C0`)Z-^%Xq zO4rN9Yld3dqk!9U-Y#G2g}gHS+>ol6 zlTB~L)^YniIC!Te)FVwhKBHI4m717;;6U(`&u!e~SOq}_f&C!^4V8u*sJ}%oh#z=MPYFw9X;?j#M7{F% zt<0d*KiQJ&MY|6T0{ELW@qTUt;tmc!ptk9SQgN7P$j3*LKDK*{D;q8}dCxp;0%4_V z-6Cc)IFK%|fDvm@;o_tZOOk{86q!(GhazEx@`{k| z0`|t=Y{)b4Qig0os6LoC1mD4Y}j?%#Nu;GSkrROV(V%ZSjXNY|{z%2Y(CGt<)2)un53^8wMf;H!hq7 ztcWajtzl@8E^|hS>;N2KP1TrsQn#AX*eDi+F+N~-p=A&<@xb5*`jNX>KBG_mUYw z^TqFc{YuI1^BN;o*8@=OQs|1Is{YMA(C)8r>?P7kSbwBhQcWJmI%CPd*pm)ykfM^S zA{wwiZh;Eh?pxhA5Zn|l8Tu*lHh~m1(B)tJ1r@jD-HzJ(8&|@#ycv{yt?MGjUXF?@ z+KE4n@~fv`;sz3o=SeT#Fy1xa#V%NmU1t#1b7nmON`mK19gwu=Eo7BIQWH8fOWWA? zS;aldR@Bml;Z)UArG{Ff%eJ~gvhudB{i)de02uv;@o{C)c}z0l4*3h^vL@Zlsmw1w zVJl7oR&FE%s!w_cJ;NC&b!OW;_zCpnYK2;0g^IKmOwnyMlJH22HgYvgfUIm{ffFAG ztv=O@_pxl@<6XSKC~c&Q2C>zx z+Ke`*F0W)U-;@0{6!!*JvP8FT#GXsF5vt=U5ioivoq}JIR^*wt#9!MYVD71;8tpy@-TB(9vQApYX;zPu} zB_DTv@}XwPLCRPB^c92ue6lOj#KSZXp=YBCUHh0kx;%EA@ze7RhR(;-^;0Wf9_ zweN*Ny5wB8^n_5wx!jsn6`-*Ux0{ra8kq!bVqXPtW}AmAMJa@4dkqw3KU`|f{xrBE zlj}nsPkU|?U44y$u>&m}m*6%m%ImcZ>|Ei6eS9xcAW(6|(>Xvt;jUFNpqKt))9}2F zqn@etQjI%!%Cum4!<*$U>541jOK+Rup)rhQv}4pMKXojL&-i!+i>R|<3-S^)yD@yC z*C*y-Mn51WlXSl9L31k-qCPWf{)~5W+G{gHv~LCVUbM~vM@XfUcx-^qXfmRq)?X=~ zWYtZd`eJ;&4P-Z`K!>3sIu1w|o2UyqJJJxlbLcd>nY*IVj6YsBebAaRnSaE_S=38; z_Ya6yJpP6e(l+Ni(ooukp4x(tQ}|2ZMi*Ax2nrtt(RuIj6cels~W5Y7_< zlRcdcq~z^m_pVTAuaj}p>8F0%0{P%VZ#*nVde#s#Yd%B;Sj?8Od05^@oKD2DTbAPe zrQLzfyT`QZ_>B@L%8(l*-c)H3SHF#sRo2f;y`70-!sC!8|7aPpm6VB_cn|Ogp&?gr(6pAW&4|4Z`qF3?3OU5sS>*p2SPa>h||9k^7V}} zeU!csMum{I!r)DHM8aq~F--|&)ZUdp^z50p(xKYoTB_W8($#*gbByNPQ)toLqza`4 zVCU}_8|jY|WJM)D-v{hQ5Ib6zFMcp^jiNBTF9i~s zMZqR&b?EEZh91TLsVIaD=PcaHuTFQJazPiD_Ym=^TFI|J$NlWaa-CgyKf6BWUn!?q z>Wgo9`WHElf`xbu#404bMZail5RXrlr2Njapcrys@tQ8LG&!7ZCIyoMSh2jHhuH<{ z21YLCU+*3Pwxm@^DG4W#bg&@-QjX(zh{wMClSmVZ_pQmHcsc#(><-xEQFg{mXI;S! zOMltWS=j9g^_;m)!=WH^vBMPV*#uSPA~QPAErRPKb_|D;BBu#Q9^o(s@N&LI29HG3 z??}Y4C@YD=)~bT4k*2rRm$-f}Gxk6CG;3`-(qa<)Vk!??PWymDcxR*&wo^sKAFAsz zvt3L%!I?xindo57fhlF(cOz*u0`|fQW^Atpt$9IgGH{{iTUB4-TpD4a3O6}9*{d6r z$5T~O8biueDFmTnV1(Tt$i%=l9bn(=#Kc%D@tq#U4JvKcT4Ua5vrHBaZG$#YQB^{z zdt{luTU8slc*2p6uA((LUlwWTxja%$n5$|A-2HHAD>s8DgJF>RPqG-I!47S(+LfA zkU!whRN05uVLHG(j#|z+DS47jj^;jBUawJ-63{MB->v>Q2o?e|fyS({Y402Q-PRDn zjG+j%SZY-W)qI(e(L2$!9EUXa>x&^Vl67TRyii=H>h5=4kEd5JO8oVHewe*<{INx=tkYwf70v z>|bO;BFE}BH1hz#DizVhk@NiAfnZXGI;TXDbg1KURvqe6-v z_jFke`czJk`Nf?L*b3&zT#2x8hj;_L;h{r!{gctR&w%9NR6!RCC7$iB+DoGN-_$k3 z!6oYID%8Y^fZ}_f2*guifEp>_v9cMc4)#e#pAGqt(4K>`{FRmRlWgi6WiOQK-Nt(i30jzrqC3=Z}v^^zCha)>SUg~`{Z=7Zc;kwKB4v~N%fapEinpw z{TJjd;Cu8i&1zgKj!}argGVLUf)7~g=IdS=O%QGqR?y# zlfI0K1Ca1sbXL~`(DuC2uKqQFzXLXHsY?qE;g(fjT4`hy zdS`le0c+`;FYLoyFcqvN;-5i?K#w=&uBpnVaL6%Vv1LQp6FJ_z9cL&qUI~O zq^SeAZ04t(@Ox&oc@sER#OizBr|H3%6kU~R4;9v;-%8tqeadd=#W4CWH0ja3_ul2q zw%@MAA6izmpEZaORch+d>#DMReX-59G68foe2@-xtUx>DoQ$`<@$ z^<}qK(P)C85Q-p!vw88^3fWKm%!|ek1Js51cjHtRMhyN2bJm+80lpg4+~=uG{oLo{ zrVq-2vk20K=iD#l$AZI)>Hn@hkMZ0pdish(Z60JFzh5xGZ35l>$)7ks^Ft>L9i#9~ z5{tzfy4IzNRyRgAsjNoxS{DSxD+5DMm{HaPRNA@u?Cu!D9l%3dmHJ5FA0A(Y37P&b zxoy~9N-E)F>yawyC07~Z$RVl$Pgb{j<Mdr(QS7TFVWj_PQV=VY*3aR zK>Wag_-z4C){go3BFyh)u1xy-;HBTM(i;}evXqxd96JXI^f zb`NVOV_q`^)bA1*R$16jgEU#r4CQ(abi!}rI)Ji7(-+*LfxeZv;LeDZwVt~KT!e}Y zOO^Ajt(>^N>@zloGTR@qIizDkFErwka>}huCoKPD40Hh0dRlU;MRs%>>f)Vn#DMP{Wc?f_?{&*mcf%Uc0cR=nZin6sW@BHnSMfCR>(Fa?0F=+ zhqnJ7piR=+L0mda^d$Ya$FgNmd-Zv6CmH~4y!QPHA>#s zIWR|ykdKufE7zhr-z!?NX4wmU54LaP%_treJmXqC{!Txq4#mTezI-JJ(-*unwqRg5 z2luRU5{itddtp8m=EkT6tWDH&ZdKD{^-rE0V>mWSKfJf8rke@q;QroXzmQR%D4|j0 z47xuwD$pTL{Wm&^;#GfcN-hAFpr;|iEV5hNIrAf?#a`@3+``2ieE=o9E)l#$l7&!rE6v@0=PyR;N-E7V9Vq;;WxPSUD$A9T+!3~}meIQB%7QXdJ@P@~q= z^;Nc%1TD4GigX+dGPn4ylmA`QOJ#my^$wsPwk9;Izy9jA%zHwQ!X>3j?q-@>2Mg1| zO>nfBk*R&}*8(42F7Wf5&4!JN_KVvCZV!wu7h zlF45uK$J05ygWIAC)qUj1&5AIM0Qc)T%LtPUFWw%KKbZ?rMh&|o+O9HG9#0CJc*ol z;=Lm7E6iunT}ndAw(N}-JEkGM&lW-=))Q~$9k`d~=u)v>9VSkgdh8lqgt&ZoCV0_d z_|JTYHOQlzO?yaF`AdEiTf6J0i%NBO#q<>)trowbl-2=k%@97JK9gV}Hu0Bkuu|cU zx8A@(MavTVm#?flE{4uxRKUGc>|Qy*R!Bbs>Jhg=DP|t&-NY}*D{Ntm=Q*1j`KDRO z#y1Xo`#nUcKU?8~MP4{N;~abn!Jt2Pq-}~KZk6Oef5W5oz#H^~@}%-aAO;)Ar2wmL z73c~+jCZBPo{B1E00zv?4t4&^)<-t@%^Cz8ptPwDXH!CdR9Nk=ZpL1rex4d+2YoXZ zX7)T&p+Klo4&&Gv+!2#KsSch@uHh2s@^gomFEqUO(Taq``MpS0Pr~gtZ&tjPA@*;cJamUetPCqN=PVZ zqIc*bM}*heiGj>^W6tS^%EpL3PfhvTFOp3G(d+}aJr)hO=qTG*i~Z-L1lfNdqLfjL zY(IeErcPr9sazH!9~@)A;a}K32&Qxk4}JSlOD5%e*<%W;2*fnG+5I+|2^~X?ec6qY zA{m{R(;Kt2UoCd8lZyhyHU{7jY$ysKoPF>)G&|Rq@>{WpIy*N->A@+ekKA3MhwDA4 z@Y7q-aIkg3bssBVOH7?S-$yB@-JiY8)(IRLN;-|vs{S%NS<{4@byQT&6wc%<&o&_s z*dzJ{-s4dvjkz{yYAkr!`qvD~stjs>I|(L7o88vQf%{7M4ead`b6hnZ$~J!{{_om# zp+Oze_yPdD1w`Lh7>DWx>KuFw$xDU@U3?(nX#Rxp!^OLjnOcNSCgm7upT?2Oe)Jc& zT@qSv#$W6w6i0aRFnV>Jvy(N!XLb@Vg=oq8i&?-Pl+4OX9iJ?zNTdx&cMF^-nqQBF zi(*AZ}&*0tH6LCm)3O6659nBV>*~q?%0r$Ua6v5*>j9c$BI3nHo zfNXfE)WiGPPh7)=Her_%pxeAN$05^@6xBnfPOT-m!IsCRsF=H59C+wGMI+696YnuZ zVyYVfNp0&t6 zb58BubxZi&%!-6*@Q;Cq{5Pt)nkjRC{JW-o5B#+!vwGWKBtZq5ty5*!*H9(Q(<(uk z@(ynv^kGo2Hl+1rdIXTY)vqH7De;D%P9G4rDa44YAe)Ep)IyT|OY!ih4n61|XckSP z#0E)9`)AX{vqH(+icC6N_CSPE-!#LOgXLuhm+eI*8xP*-rfe@nj;je$L3S#BHr-Jm0HWku^m+V#lt}z?1 zru@>dJYDo=pxx#ur6njg|GUN)7rN7grQ0tYO^YcgI8*$0ZHeK8^{U8U$8)T1^NnUp zWUt+@KP^B_@1<529PyRv3Fz&C^Lh3EPRL*QHNJR-U7)(giIMP2?B6vSuPpm^Kck#! z3Q^(tzLnlD5uP>ZrXUPI)3HMi-3o_AQ!6`cF7Yk7Z+AM5mY>P4acUcK87eiQuilHb zFrEWsLT-jCqUEc^m%vtvx(8ZI20)7uoiiQ~lTBY$E#GVY5@7Ii;$vPfqO~y;)%&;q zE`0?h%QxGQv8 zLSU}MB@Y|WWTSjJUG7)>elf5mY;HhEI3FvAUH@#f1i_j&|13MKJm*oLi3j)#AC8NZ zzfixyIm`NvA@Q~&;P*fjTY3+S;yp`2H2YGJ=m+P@a*YQAGd-~LFSHK|^v3-#>S^Py zCxV3*C#i=#-7%RJ_(YHC3Z!w&7uYGO#N)MPV{}SP?kM*M5q<%qjDl7pJ>PN7$_Qh- zx(ASc9d)rMC;R$RT*rtOt~X|9--auS_gjUurYWTcH2w2xC@xM0Q$P#*<&d1*@&oyFT&4KGFk0;}44s zj`?wi;1pWUqg}^%kl2eAh)@hOb7#)Bx`;ThiF+!=qRLvgTacbG%5gRQ!9D?n`c1U1 z{sFq%`Z(t!EoK5rmQ?;+-x~B8J0dsOcmDlJd{HRS#FN7z-Sb6B^ul8l;C~CIAsVY4 zd})077>SExxO>BM_&&?{U)H*O&*iH93k>zBq9}5G?DcCI7&E27PW+XXulG%>V4v!=agxzA zJ6nG#+$`nQ)c)8{4apN)jRp4A>=HH;!~@-sB*s1D!Bk$AB{XB{zY4&m(c9c7hrrkF z;BBFkeT6Li+BvzW)zr(!g_q?ZDATw|EzYX{2-{-zv(mO!?2v>*x|NS!vZA5;IWMW; zgVLz=_A6Zjfz|U{Bp20ot%rDTe|nZngfyY#m8j~)Zoum>h?=;mv|{qT?vxkEo;Zt> zP(C#dmF(FCwNvj_qf1QcY8o9^PQ{%ttE->xX#AOBlf{YRyQyAso20)V?8&rMVE=j3&8cD+9`~iQyo$@i+XE5n{NZ{X?tw8En;6 zC1+p<^oT(9LFiCLCST9l2%_fcXrAkF4i|ee&|9i-?o3|R`l0mm_d&>-W$qfCT1;Pe zgLh!#??J4p2sR3{EL*RgsQlf%sk%?3irHz^K^3G=mBhG<5-5EX-_VE)r1D3Ak~o3o zXGWeP&(p+Vtsb@IH0dGpB6qp465kWuy-C0}_?;E_2~loRmXmkv0Z%sszuN4o?O6=4 zE|?bY2hY-+C&DE_+cw^`2AWLnPT44b?# z$DJ;(7?Q3w_(AnrN2(wQIfEa|X+oRsDU!8BEyO0-u@|HN6-qx5K@lL3qX$$57Q2#S z;U3by%+Woad$;(Q%)P)_tV6rXMfXIiJ{mK0rs=kd0uCR^(n-5ZRuIK)!)s3GwAJPw zV;>iOWIMOEIPiAQAAFL@b})j4D_ShcK8z&CF*Taq7M`+s$F_^mdcnkSP-(_q{VXYx zK2ViokjV=`GD+cSd}SaD=bCA}iAd!71_o0QYch~Obe%%AZ;-!?C58Ds$5eEN-(gn2im-g|~-~W=d@24Ly z5NttsAPpYnK2Eyjy&z+e1rOEw_$U|ZCd8%v8%0gcm(laI19fR4Bq6y$^8Z>g5ZduF#Y6_o_8CY zWyK3^Rd6;0gLUAyS;qRc*s*4(f-UNJu~XBXi@k*3+;>it3Bt*D@Q35x2&bbP-B=J5 zDGVWOrqO_;V7w=?7Yt1AMa|x|dOLFNKL2-(ii>I4SY2*Ui9V_Sz=84sE$jh@0;9St z$3tk@!kzB_h_XZx_~IP)`4^g{c`BbCR)h1Hy++Y<8%^JosDNtUo&vErKSPHSu+wWG z8)*_Or;)GYZ8~(=eC^!x{a%!nqQi+2;0N(UM&g5t=wZfhAw=)~ybb9j%jU^rZ>nRMz$ zyobT6`qG+Z2Mg%o)X|)?Bbm^66j~p3Jf6byX}>gO#W0TUwaUrO%@W1v`Cs)6qzZ$Y zIHjr)Tuq#fx+cJS6|#}&%}IW2*TrKJXXNQDqOm>^?IOO#dbf&=J?bqiMC(=*afdH~ z&h(2LF9;~&k3P&^mT&0((vT3UlI!i;c(?A8yKq20&90lJpG-kC9S`_fXyQM2*Yvb6b;yhD>F<)}zbc6AjWx(e+dy3tV`fIK@7*lGG}|4%)bDsiqTatk0|(;OXr$@epzKkKpt`h zC?406{R@$C^v^?Le|h#&jyFw5nexm&7+rsjbS?O?Nwvo-W?qMlzdgwFs-?t@7zvqH378+OTGt>-49HlrsT{<1pgT`yN~6 z;B5SiJt=65g+I+8MmQH&998kqR^Vg23DTh5qu-5*NnuY!BIo{VjIRNuM0ZlnziUEN z9XgJP*qpc_q>5nycj=RI4<>$F(*n*WMC_Z{u8;Ay_drmq90?iyo9-c2i%nPR26HkHGOk2`Qa zt*cUn1!T9?1QlqiNB4q%;s)9UhHUtub<|ICE>uyu`Q0W0B5*rIG~`~Zylp%+Qh z8ghp_*PVo#`<{+L|7k=Ta}}4=NPo}o@Ui3@)#9>}gsl!}!VfH(B+2S6o@`?siqKas$66yoG}M`N zAh!2Wogmm;M&XG!O{kscv=G;o#Z(bjqDO=dpTQuk_VH0t#xc(q?b`FxmJb(=tSw_$ z|KFAUR8wc6=C^F2+2yT>`ohJBtce4C_)Hci+a^(EQFVobuDlz-Ep!Ag*E6fh46@d6 zA~Wram97%H?E4#aGp*{H-;L~rd0QPuG}yXL;|y&RoB*kthXN(NR5-(1rW+PUgso&(3c=qwUknHuuvYEJS>_|{TW%%6!#x1*2Z$2Qv?%Y8 zmVOo3@6j=JdAK)T{XBR)gU}QFEKR*E_44S6HKU(qp%$NC(wkd9>$xSn&J|64XTKQH zzE+A?c%cG?CmXqU6Xs$bIzg&QkF95eA)htSllQWWsCL!jS`AxG8exG6WT_Q`X}_C| zYd>=-cY@+N4YaNzAR(jjF*CD^iJcKRvUV4fMrT_O>zq`s1~sRgKArO6wZ-@h?rvaq#^Jj}N{0)|XC? zjy8rOj&;Q!>NrNR!-^}x2?3B*cLP{bUrurjc5%jZ;)MLOKKZL6jQ<0$`K!zwP{zY!TYJ@Uyb)HIsA9|KO%W^S7$5%&*G zaW@@7%G3VAyr%zUraP6I{V;R@;LVu`XLBa;R$WNQEOvvIMA!w1>$<5s0pt@ki?{`6 z6fsHdvQx0jYi>9lmXHsXf^(z1At|vCc5#k z{Z%JK#EX-R9U$bfhcmMp;Lsc}Cdg5gANOa^P~4&)TRI$~Ye=R01=g`>=f+P)Jg|f;3tsD>c(5WxWNj1jOrburW0;IJWR>)5pdMMHW6FcT8(A6+3#j2Du z6iDGHSltt`@c8}{S^F;E$WJqi3!0Xnyt}oMqM`EnGd7O7!w$1PFA?V1L3%jkxGPf1^&Pi=*FIK-d#MU=4L#q;RH`{u zoIL8!uYJbzh_*!0a5_JJX2_FSm&lCeZ|V91Bf0yO(pGAfQdc(p`8!8NHeG$4=sJ=+ zI$N@Tf5$ky))8(IEGa*ld~4B%CmJ>21GE~cw^#2$kwelo?$XN`M`=~R@vw$e5{zNVG$Ly2`Fp8n;k z`=*oSsRoLhv6;6b21COTy$2z*91Qo%z(RMRQJ)@#93f*e>097<%qd62O5Z1QiAd;7 zt>Ea5VqMOTc8p~#QC&hh)OEu^pAOjOe*xh#D)6H6Px$L`h9(Bel(MRpr^8nZfgk^C z2C7od-&D3+jutIVPpOb|lTf%!D`m9&MFsoR)GB5wfSy;m4H+>$OyLt?#7^vKub09| z4=4O#J*h|Y?4AG;;;I2?NqUFExIKTCvNZKJryb2^GuB;}s0fjzVb{6-`cWmTYbHUK z&7MO7^KQo!@u$YX-tfuH!syNeB1(k)ca5qzR=MM2-b^AkuSUi`J#0;Xj5BN6%iyXH z;rg9?y;?f@Rm^fLQbu+%bqt1c@)P=RF0~@O zapFaw%2=h#iG0jNooGYPVH?y8uM-^pHa_J{xtF|6LpzhZAh?`|?CuU5b3`W%<`O^`~gIzG$E zUk?_bCBCWoRmlS?+Y9PTjhAzv(xqX37ne~1roF9uqhiJ<>zVt}x_Qe3lw1;HPsX3h z*(jBpqIPLBCR{3Yl_S{k1Xf261L=}*Re^#Z#^eDW`GBe-ve!USx0pnTy1#h`LoSx1 zwMRG)VKA-h6537vQN%PxG#jZ!xDv|F=KyE&k#iyo1tU~jc)i6RrnuHZV!ocfeoJiK z_ghmI{RVQMXtm~d-9!L7Tp~~{MVFD{(m3c(=#--OrkJTqN8j;$Q}p~u^FbFA@rnnK zAm*@*)?_o}hrjdX(2taVIXb<#ts$TO5S~p{4hrL2rkR`PXs$+jsOT3LWwetjJs4Cg zVvj;K`boqtU{!>QVy9S)Z=+cW8;wLEaXF<3F3-@-a(@zuxnJ4EZ z56T->OubBOyxk;4D4HcUs(6jft4t2>z^){RTtjf0-RhTAo!ZX=a1;@0H;M1nd6jK& zv)h;}+O3Fd5elSP_w;-)m%vue4|YlUENC6c{O1?Eiq1+@nR;3MqIC*)1M1 z2yJ=R@B58cVw&c#f*`epJJBp9n3i7IS%@H(XA@dw%#~{!K9-f2C`ufvc087&E$Lab>qR2jo5!_6)p}2a1c;c~JJm31xJV+Jjo2yqQrYc9>K`!Ac>2(y(oO(+H+CM-9 z)}lWR@QKOKf21dhd_y!#d@I#{*Pl3XbES%0`=mPocGkBmao#)Zm-@DV*p^C%hL9q30q(%4)7?o6(fPY*@HeHpsias{-EreyhG-kw-1E@r+W>Usguxpt?x>UAlt{Yr#hX%RUM0G;2qtHqS zIxv*lMh+&e8^>a8H0wHuoJ-PI@_pLetK?*Yv1!9lGgU6brlA*7q`*#bdHjDGcGtLHGMXI)dV^WgqrcORaQZLR>+CjOZGrT z)892`LT+uILB@Gh|E}%Pm~G_*UgUH$7m9~GZ@%hxs2H+aCq(GCG47@+#3QC?{`42g z`YO#nIdCl+=B!x`1XV0~FlcpDP3St5ZQ2uZ^zH zyM0x-$ZmcE2<+F3;xNNtL8tQS2WB7X*9~UgwV)$2*r#a9^93@MO&4gl>8p^k$3EXm zCu^w&>g@ZzitzTKqhT%&hz)`?VcO(E0-5M3eaq^NEJ zX)wmxXD~bM?UurMyE1Q5Y4N^+?YFfxiH#(~E2W;$Y%b#(3QWxryNxHRQY9Vsd=z1ZMqwR-tTB`bU2aiaL z=S(cLdn!8R4TPQDDVZ}J-O~8;>h;HW^dzcytQa!(T}g8#{wK}-|3P_W zX#T(WpEUPMnadg+f2GV7?w{TKc_9CP!d#Ij-#i>D^Gw&edfxf?Ob&v4RA9U; zMq1o3b3YhmPl?Hr`g1FHC&e36WLG+>tpB{$KDbD{g!07_R;tNpXt4b|VwwF5>^nHn zqx~obV7W==;cKXh{ist&|3s1SZe`{|lZWY~WRmf-|9`Z>Ow4_)W4bHNmg9=j$WuEj zx%8FU`A7cjrRnAw_d&4~jRG}h_%PeWb&9W|wU`IiIfOzyP8RqG8V;2G>^JcL0DC}$ zzZ+8AgLBB&VO_D-5+=G)Ny0>2wn=D2)%WbqUz?Hbo7>jV`2?y?P|P>4JoaZEvL%so zPMc*Rn7bJ1SHe$+2gKK0r(UB$1&S&Ol#*hGwDP@ zvfK^#X=8K1;i^z5@-p<$<;M#8E(>!J$s>MA$QyLOK*lw(Zwyt*Ny1}|Q3I}}B&1~X z$W4|-hbFil1?Ctg-b_bp)+biGV`j-`NV17dN&GLM*>1&qBw0WB1IP=`;gQR3HX=Ws zg83=r$7GTO1Y4my6gl>7Vt!;|3|k}lRp2(-Z22Ifk0Y`6SCBLO&U+@ZjH!0V3%*zV z1jG~&wfUDMAzSkoJTDgfM1>wR!`R;6z$?1)@=2$3Soy9d9U^@*xoNzSv0IgGcl;}l zs0*oz?V&44B^JD=%Osjo4mad(pj1>#m$Kc&I39hBjMj{o)xDhkGC=3p1Z&y*Hd}ku|2pLc~OT z^O4%}GZsjHe`+`%%}UFHL{)GeD8nkvQ2UWy%EXX}okLj{w@530l(MlK z4P>0bm}K*`HVl?AxdRW<_KV#a&hJ>;{0eN}B>3-4txL|L>uoq7aN?u)0FwOmp@bub zOS@uZCxM3=&Hn&%8ZSR$O-T3@$BfkpL|~?wguaGEj~Hwlbh|N9MZwLD@HXeg6)=Q{ znuuhbAhA|SANpLF$R)HvCfL~56Ay+r85G1Vi-tv%ikO~69A2D_e2#C}s(!}dOv6_a z=drT=T!cFIP}$o;R+Q<4UjEQ#96U*vz3$+9cbKL$kc zk}qN_YD_wWR$|t{aN%b_+r^34>!EzQ@*I=@8 zku7MZ1J8a4xe}9?p|+sZDfBhsWel!z3AISfB2QKFF$4{yev&&X5&N)Tt3|S_^xCcoKx(5O1RA_$W^j&(nm#KmOw8Xx z>eiPeIU>~>U8?6#-;;vw`SU+iJw2KW$))`r35kYsBfxVU7^a*DhqCPG|W zO$8>Ea|~Ddg0Y{pSsy{689$$4F)CNEOq1p(kt)(j#w+nhEK^3(g~e%zt|03>G01Yav0_Bygy=iA#mDR1ud1tJ7v526fx)N~JU;zg%de@u4bI{Q{&Q>RvBozmzKuCh3B3 zDU>83E(D(U*KCltxEPt(z}YabXi;u?g?O(qB=O8B3r~=f;8ub;q)51gslj$9eyo@u zt3P2OOjL~xW{DQY#n@uWk-4h96s0-b_~=l%Pax-v68L|`+q9ru70byRYeUlr`iQ8i zVQ6n7kW1Z{d(OnJ9gfJ#kesm3Bd9a66^T+Iu3ylHS#TjDe5dyXGn<$@LcZ;Sm*zGz z;RdD7P!XU2lTb))5@9hSWm8RXE>g+c^bFeD9eL2x=r z6wv%%dFY2{Lro3OgP^V9PR(*<$spQN`V6WK=xYlRPo1(r=SaPpX7WRkSZ(?tFo=|B zXrsk6AN3e>I0~GS_93vhOR`)b=j3N2uvuFiVi2-8xv`fg4~>=^g`Ja`6#%o(vYV2x zW*Z+pLujkE&B!1+jCVgo_QtF3E3@V;AcZIC4ebeL#|2~t;Z65L{i{{UFi*jZpbdgP?8BYs;>euY0( z!rC+NRn7DlJ3c?&0<%1>1;qaVB0-_Fufpje4n~3;jRWQw8bdo98}W(qcx|JB`5KAe zrr4G^8v_~`x(i*1Ac5MXUL;U*KDQ<9h|c6PUKw@L1_mu zbej{}S>7`vrvvO_)<@>j>2fa{ak1|`qOp&uY+O>2A>^2_&Ela($>y->4;hk)5Qrj@ zwj$-I@go@CL`eMO6R8U77Ho5Q9J+=wo0K!kc{{u%r*y~WM7b{F7#@;tkdb1wl=5(D ze;zj6wVx;=!qW@0D%@#!Z`@+ZKV5dAEniT`NdB?;x|o@Xf1~@{x0{_MG3p8g%Df6V zLO5A_WNk2^kkXtWZCWRh!BQ_wV(<2Uwu+|Mx0%GF@d~Ih+Coc3O3Ecq(S}YV^ii~y z65N^~8-NsXV;b@_u4RopFP8l-kVzm;b2%<{g*&bF>MFAt|z?QJS1!lf*^ZLjv ztD(xFIqXWZ4rlucRjskD+0#}LIan*_LcGPaDwi(W_7K7(?!#Glah1Zq~r#6lL(li`*ddU;Hk!2zO0E0j46Ycgb7$L|{*x>2(##38@wc1?0UOFKrrBMF>zD?u0@_ZZkQ4dLB zz-1^%$v;$C0OA|?KGg)e*>r^!fG+CcAUNf|7X=yv*5=Ml1~!qe$SAfLwKe7IHqsLKHHq^4Dnir-Y*}n}hVVkdQLCk~%VKFsA&gvD z+d7^m!J<^rX5w552{wzR8at&*2_m=%8DcJ5g>OWuwnL?$uJz^3NU$S8ayXKf9g7sT zG9F}6%3?1M3Ov__GehJeXsk8O8%zAcSXZG{g~AJ^6XPbB`guCx`UV4P(CN%m0$nCQ z!o_hh>?ZL`X*mULPvB3#(2BTZ`W$SYn2CE6@iGN%BxtgFP0WrTnA=aVYDB*){fpia zw6L%50rCa1UfV~PapI%)7_!c-VqN32Vf{N(CZF~zsB1~Go({wm=1XZZT*lIJsBD?M z50o-s`N~4WR(h}r{W?iZCSi(V*({A*Eu_es?I8Vv8TTlOenq;K4?-fNN^m-qyZpxO z$|97F;1wm8+_m9lL$hSZ`fdgoNJM76oCMQm1mxm~X&WZFLnOXPeG$-tLOv0@1l&}& z;rU_;i4cq>ADMVp6@H>bOIl=x%3dR4s3Qi*7{q2d{Hube$F3i9JjJ3YJ!#lwDpFX= zww}n*m_tOuM2pf`gqq0L@)gaBTakrMN@#}(Kf*pqpgJd4Kh3CWA(j@dSD|Xuk4U{i zIpva>ONuT@J0f)D0vR*qS#sg&yDX?2S}dK6^iu>PPW)3kI77%m=my+S^u%s83|fsN zhw&oB{s{J%pDbz1KPAmQGdvPhG!!mm(?mk2Wuqjd+1CUROtwZptFg_Fjv|r@2-=!< zg5!)Ef$bM)Q>7PT*tDS}ww6%29^%A1@=GdIQ6Ot^xh|O$D^^Wmj)2bFsweqzBZ^H? zUWmG2qh7fYHak%IWZa@x(RN~9!21bmO;WP^9ctd9BJEp3(#$v*sd^!R%C;?xxe^%F z8HEigQ$``I2&luM{{VS!L(hT$>V%Qd%p2+DNWI$oF}w9l=?uFih+;*SjIlWI4{$)t z2}A?;4L{(Iw`h!`p??=4{1i#wMGcWMf5Io&rT7uhtd59w+A4XGeU+JkxBQzBGdmP? zn{tG^HawQD?q%@l+eJlSXv8K)D?!x+J~_Jb&2Kc84x>MbgcUzQ2aNJt6Wm z*MMaS(iNs4$r1rMSVgjOl6_Ez_5vP%t4yHW-s3#qtawzcGweRlp2Kk4!`^pz{fgTB zjR0Pukx;;x_$W<%g;rZ|N}ZHBZWGHHOx}fHbsZPas7<>?1>gjFvwoB|N2;pqhQMn> zLFQDILzL&~P}THM)`3{$<%=ot70I6sPJ=?@wt?0yxkX8kTIbOZEzZ~(k8FhH?p&5+ zZrH6TkeZxfWsoM6vVu->8b}rnGv+|9MG-wD^+{4kGYn+eF7jfD1E17IWZ5S4IN4CC zaFYTd_5edwkpqq>OmNRUvdpZ1kUnrr>%33`19 zawH&J%P9?!m>|INWJ$^HGKEbomL*9jx*z0U(ODZ1?T@QjSfsW(ADbN2j>qw<%h8>D zfVzxIZH|$!p8=%5c=Kt-iF>GZBW_9~^Tq=lwys&cxS*uf>TdS*i2gj+=IsDT`kzKKyIp)WSWUCRs& zkV9v)AySE6Jr)WfxE{lgU-=Man2QiI<*> z`cWe+TjDtOX%xhX%K<0k(p`Toq7G@bdL2_tj%^lrBU17|XjaBA_vOxt;YQ**PO9Ea zgD0Xi1WGqG>K z8Xwt{@bygfFOs~a!YIw@HY6F)L+P36nI4fcx_q!;XhV{=!U;b;6RWam$knxF1GaTB zXUIuA$i=M3npn6Q!U&h5lNQj%ow34;G$Iqo%1AJFp*WP1*ryS56gJ_OQ6s4X%%XAA zDJ!p#+rvEx-EyGkw~|^@ieR$#FXkmOmzpgLU&)OgN5_e4Y)i-7W5~lmh~DXu+1A)7 zY!XpI7ppIPoS~UNLAw^|aM5}rVs(gJ)OU5XJ5AI{UqW26saPN?&7`qHc6p{n!WMi> zDmR!FYHDD_%6{SGjLYM=CGiuz%pyEh=W@Ew}`=^bBr& z!Kh-2om~g?{9wb8|(Ek8<#5Ua?{f}IkZb&#K1J#-Gd2$D;KBCnDrCS_%SSXVuhYbnEn~EpM zu0E6n#Iq>s8&oQX$Bepy;TyDsDVh>mXz|TUg9C>Wsv;&x5VltbM68-%$l26=4U-<# zu6+jc(ZkWHrptLFNjD`$>Ex0XuNN%A6paZQNJiklkbn`%aDXl3?Q6~OX# zVq!fARJ&xdk+l$oHrc3!#?7YWSzkTD*`SuCKC`d z7Pm?nrYB{ISTv(A>@V@iC{M#y80ZGFumj z+@{exM_&pY6X;6E@sRO0j@YE4MP#1+jK75nC#oKW(Ek9~jS974$!Veb%P2tpCn567@8yQH= zGnP8c`=$!-;CS68Z6?d6dh2AD$-$?jnSU1H1RrNJ$pYITOTffuo6P?J6qR)p)JI8> zs;??I@MZ}iFt#|h!cs1BC(4W~bD*INIEb}=t#CZjzuLy*F@!YO9v=LJ=s?JKb^lRCzLkRd}tFd0dZKbm>*qaiVIQC;g5_ecEY`H|m_lb|Hp2Rv9+8v0ks4nv~ z6J~_~u{AL#G#x=D9t@Ij(D_b`D+Wi18)MapRb@dbj@08xtQr8`c<#{D)UOkFYX_zz;$ z(?fpGVrkJ!tqPgpjTuk7LYSXZz)I06A-)=L>jax=A^f^4A>JheB5Lp;Z*kVi4YW?4 z$$Z$`A?JY-OZyRQLsF-Qz}L2t_9o>$kPFaNTsov$pH!ZQj-fdu$JZ7kJ|bqB!X#ld ziKU|@wnCzub-xcxs+5x<*#mY7R6Dlx;8l#LPh*y`Ka7Sc`&vZdyqMho0HFT>j9e21 z#dbv*m52OM-dCc)U%dN4CPmYZiA0XlBj)fnPf-k-xQ2p8=>v0Qb{wZtA}oK9k~3Wr zZ)3_sJPFDIaM03Gk?(pXWi331p_>y$a@ihW+gunPOuY&PVReNL*_G6rojVYD}!|KUTLi8b%b{C{Re;RADc-9 z{#anX2C`&1@CBr-?jck6Oc%x)F5#m{0n&CLmk2Twe2V2#hPHf!-up_W2&#FQgohDS zgWC%3{s^XL+&1j=`4E+d0dr-7q(Rss*b-G_OuoeSZXkT0v0x^WN#~ZN)NDrO62RFW zS&Dp;+wca*SY|suYEZ&$QaeIhhz!RP^gH_1_=mTa-Qkm(Q|RP>q5l5riYlSG<72xe z%>Eb;VXIBAMYXyU8zLqwW27SDpYjmp$H8_((FI`=d!q--W&KRJmm}q$avYi2V@y&t z63H$}6X-!%V3|=krzmyir>Lr{?({xm!8T7>5qlz9ofC;61;T{j&AG%z_La0qZdyWR zy#mZbz6tP!j^LEwZqSzmGw^DqOKTZy@3H>?;DwBsy^O?@!Qg5m_$VjhN;Ez5W7|n? zgtw=Q9c^MUO<1^M1_%kk<529tkH?dbB;exQ+aLzn(Nj>mdoOCwI;3XRot(4_N z%3DL$H8+apA>rcmY*2U=Fm#LO$mxz-WQb|4k1c$?4xwf@y)yTZjh$pe^bhulf6>G8 z5pX$TY91mmX(5#vVIIVp2!S@qya$~S$9cr5!C2Ju=su@UU{t!B`Wryaa4$H3wOh^F`2>*ltPs# z6a7*#)=4f{-+#ep$ZLFt0w|=K7>t8@JS|ek(*GPA=2mSJXv=yZ1l%3tjDW}oc7=K_ibpAS8e2_RVsk3S zd`n>hyRzB@{47Ew5(uFdpCr1RY;F9LYI0l9>qf+_52K8hCIp@4t7x{#)3!vVdE#Tz z(sWT?i|jKV{yZ(b;gpCy>L?@Fi{NzJ9Or_pLk3J$j{_8nt&9?1*y7wWT^5a{BwR1H zbcE=BjQZ?JR&+L$%=y6Esbe8C7@fyEtAQu1QqUpd`V@z~G-J6s1xkmeM%7qRaiVKW z6GVuQWuZs6ulc5DUh0wHPU)!<8YZHdFTrTZ%e?Fx$#gZLWTb_DbRtyfc{ckaN@1}H zcr)8WM6;ay3SEv^-1ztq;g5s(E*LWGe2rbULLq35h`t8bLFdz<)sV(l^$CC6PG9qn z@mdEDuvg{bUz?H~G{F4>qb|d|x%Mr5+X}gr3Ghfi$zaCSw$x9+l7(X%^`Ymv&4wP) z{{T`W13#+;Lr%wZ9*gk^^b(5}6aAk50I@+0-vlUlcrU^dlq|avNc=k`&(+AJONxjd zC3zB7=bG^-?xq1dDX=rSf+shU)9|Cg){Xfg(SYt%Pz5LSUqPb-&nf zbXGS4no=OI2`!x{P2qhwFnm|Fj#&4o7%N$17++Kw2cw97Kg9CzWO#xioPqD`jf$J$ zIYRtj9T(8h-QTp%TW_ z4@nHykjkI);#|c7bY{hlsG3OW;Y6*Mz||)6WT}brL_=Q)i50g2$bP`c)l~uS!V$PB z*CBaC$E6btv_w-*Z_o@Vj{@veyou*rn-x^1!gQYkbr&0a5f`b>8W}xrq&>8CMsX~X zZ)`GVV+Htj(3+Ebu|!opqIIC<6MEK(g7|`bcy_RweF_g+AN}%|`GYI&GI8Lha`tFP z6ncNLvb{Q1efS<(_T)Ey#lid_uMkH3?mR&Sg~U8Sk~YD_j?h)pD8iwB&Hn%zBgFOM zUx-tLeiWc%UrorDOS(_V8V~KUS^BK z_BmqT0anx6A97KIapcsr7@y(!Zs2?_oo3x@e+vg!tDH}W*Tn`OL{V(;?s$x(%U zkZgHwAF=#3bZFmRJRG&|V&V4}2t1702O7w-To{svutfey!bY0Vm$K@9Wa4M+2ESAw zrB8^Skt2cRzienpj5+TSbga2pEXjLELOU8G#0W3U5JmW}<0i!T#u)1sBU$$w7HVAOMUY$%NKIJ$ zvVxW=z5`pJ2};8C1Ne3DWdg6}g{^0g%UaLe))^?b&GEnSq!MhEa z#GEvdTQ%5IffKda~Jy%~s2mV>a#XiuR&#j%Md zWNTe?E}1etedI?jm%Jg{LOlpy33?8%zhrkW+dT*)zZ2m;Bf>mFAg>bq$uGf#K&C0Q zWLLm#4Pj-;lE`;5lYcV{DC8!=Oh~=>lu0Dlm=XMvk#TwRM6V2C8zFwM_Zp=34Kj~@ zS|JcvuS|FO;)ZL{7=7zRZ@ymY@_rw9u^^SZR&u{=C+tHpL@W@Q(0_6gG$HAU0VDm5 zc|!C`vw1%EC`oP^6TZXJ^s%RnNNx6}x(&JS&{x)Ck#K@<(<03YArXs?%NI~**8y2=EEwLiY{Exa#`ch@r z%>BWy2490v>-9q?>yh;G1Bx5aD9QQk5L!P(l1U_!;uyU633!*`6_Rhvljg{N5g^8L zikvR8CE*gnOl;4z${gW0Xl%&IEoGq9{{YxGUtu!ho=`7-l|Ky-c@Kn~Hd`zhv7>rR z7d}VR@7W815<`+cGSMMfFFHaT702+Av3TqG*N7!Wk{JSt^BxfV71Ok6Aoij*#C^y3 z3d9ew(RzfpAE1#4j|=%ni6{7A#Xt5FKmO_2Q$WBt4f=DCzNj?+&FT`{FZ^U?!YH)k;@E?K*FUMXP z%S(}2s%!D%+GOGPlm7r} z9+LDyE0jag9X}og6FspIuOalx$h*~&)xKr)XK0Z+@dAy~Hw_5;F*1}?iDn)pOvwYO zE)bk@$AvRQwsr{zK?8yt_|AwX(B9Zff>SRqJ`>`6Ndyp4AHu(ld7-}!7BA1g55%$d zDBTnCT)G`)SxM}2n!u}G%H?p-r_r>4vT^V!2+XtOO?vY~JhI0Oz@PJvE&l)xID}4I z{5$QAeze4oP%Z4TR@_n+KRG$$Da&>L}*>$e~&-WTodTN zU~;E^$Tcs9L=TTWC&o-Qq2=E(pGF8XR{188w}sui#STX5qFuIX0~Q|f>Qtcm&~I2E zGbUM7j-6pb9VtnK4JhTIH^EwA!#IL(2$-9KS{Eif%M;N+(`T_C{HKpFNe)D$`S>Ed zd;S&4C{pA2Pl@vaT=jgd`e2;n5F zO%Z-xixL~yky-Gv%277k*s7s%29WgnBF590s~QS5;mPmBl0=DtCHR6F9wYdBArOre zB|$6AkmZny^RE}>qCbRx2p~fCC@cJS$o8GPAV^dhQG?`{NZks#-mfLYR6y7rri8in z8ZScr6X)ND;ygpzg3(qqwmqtd9F1gJ-dG~dyWSHfv{_C^y0APeOth zC&-D7l0Ep>mdPY?m$3?N%!|_qlw6~wObMopN9Z9V1c4h<1YfwM8m;K)0^h4pBtFb! zr{XA&GD$DYlzIh-Um0R;x)L_BL_~)X0wtZzB7865eq)36;&tKve7?pu&3EFX#gPO* zleS5cx*ySWYwY3MnZutqz+H9`HC5?RHhtEJj16nq63=axlt%wktQWq4VBV6EHn8$I~Qx<5m~ z6g18mJt4J@;*6+v3nte-M$LCL89zv)X(h2KeVC+KXR-#_X9PHe=BceB<^0K09nZ5? z6;*B#$kPeTxCzd+Q0A0Yc^ubEcEt|Ko{0G&GC+tc%92$702P5hl8F&N#yolwTzHgG z>5H^=9EtD3Q7E4a%93A)1WBS1f{_D~5f#}lON2l&PDYst?We$qZ|#UdqX_XmaC6AU z5yAshi090b=f|%Rv1{^(k10^aGH*jfa|(o^QLmRP(cT> zN${14njb`ukjo}C`mw>`Tt9F>qtZ0hE*c8q4?{N8aH$Ps&)Ja*s(Lj7=3JV_4}t|M zTOgTOtHMG@8XO3AOxvY8VnwF-V?GOFcv7TZM0{$ZO%~|=yf#SsvRNX9B$A+&C6QiN zg!H`2@ji9oSD6#=it_W4iCjrCL&<87WFquQFU*?p`AA3-a70e!Xytko=E*!WJ`j4~ zpKxfskfLPv;D*$bNU(ZBS&jsLEv*g9b{i7cm6S=y30XyWj}}2CmGmRn_jPENBwW71 z?Mjx}0Rzzkqx&RYYX&Gzv_3>0yb$p!8I(aNj?dXC3C=!KD?80{3$U| z^3lRv2ES+_=p?!m9*p`u7E}sDZO({#8fzlrXo047i0o`-x6|-JZ0)776OR6YOJdmQ zvtkPeRBW2DFyZQhmd;)dB3z$=Dx*nI{zIBq&K}HZ+LxK?0>=2$fRH z6@3u2)TttgNM2P{Eyl)L_OO zj^Znlu$3JUej4c9Y=R7S{6m~c>G$C^V|PPA1DLFe&oef}WgnP)UrIxoTb0Q7rgJS1 z$Eg%3!aTd!Ag(62ZId`9lk!Qg0<|Gem@mf&7LO3c{Aa{}Es)5fpn@Qi--mz?CFTX)m@HPf6A%`LPYC_RFqLQ0i1=kB230uVNq-s3x=f}>l_Rtg6Dm5kTg$j- z*u&uCf`-gK>nWLp2U`^zp)e8fsgUYUjXhIIPF4PVXmXk8J)s5|Hb>)F{GO0-5qp6n z5tQ&aA#0JFqfdwHv%Y*d#N>oG{{WD%(D+4=7qJrf8Oog5dVR$eXBs!}nG&U4kYi-A zAu91Q?Dysd$bZqTrbDV7r3pC?4G+|_waEtGkD&L12I$1I%!WIg7Hm;cQpKc5r${97 z$P^OR1>jSWW3o9Ry$Ts2c#?cPc3J31N;l$BB>0jfhXfV!N71hmEAazPM(Ra@$?UHi z{tx*jDl$j~)qoKqi5SN7n(IlbHh{0B= zZ$jx(aI#H)V=WcXu139zUBTbdC~%5eAxM%uB@SJZ*w+U@(O93u5qA~DpCYT4LmU{S zSI>*eFATC@J{O861j{6nU{<7F5X~AXzX;!l9w9btlE|VoL=a2yUSsUgj=|w1S?y*w zx>PQvYA?zq$@GL=eJ~|g7=@7iHx0})KzmEe8q1*+c<`ysU!hd*WJ`hTEt6Sn#`Bd# zO}08vkd7@igc45t5Z#5Ah4yoU9w)MA#)v{W=)*!&^pGnuIda06MAYILC!3-3n0d9z z5_A&}`(i0q2?_U4xiPlO`ufM~K0R$AGbMh8Pvn-oX1IPKNcI?PL{k!zr-DAnk?b~( zhzKZzI@&In=b3DQN)_@)YPXSl+be_SQ1Riq=1!2E1~w=w5#f~=QY#Qc9}-5)qf{<1 ze#wL_HcUHAQf&AWb18|+)ElBqdldPp@Q)t6NSfW7k~ZXZU!xT4XM`;JvIv-GX1^$nm)039em!B4NvE?& zCA}QWaNvW#B+&OU?d7pnj>p}SC-xn$%*0Km)={#2H=$dL zqXD$p*PzMZlBDV2ZoL(PYtb8=_`T#~(C|f-zVL?Fqmd7#Yhg}O(qb&l_$P@yND&yx z;JuKjP2sVy>{XA+ks=g%Qv^5Hgd}VwPs9l`_9lnWh24t!K1sMDdJ_9AErg=-X!caX zC5R}9ITG-l8p_FU0$U$6)P&0yES$rUrl4OIFM*AByJK1%p&*o=PX5WM z&_(m;N7!~EGt|o8md5WgiTB9W!DfXh{o+7pw(34Gky~6+}r8&6o0GmdW-n!Ib?7af#ExsJ7X3?IKcCbykHq#^J$ylSURwr}g0o zqG{cd^fMj!bD?9)R7|K83li)g zm&}sPm#+z^k$w2}krF8QB%*w1*O>C3zelncPS#n5-VU6N=VRAh%wL`5#}){!6(;ZGX|Hg@XhsqA0j82IFMu3DuHG zN2)5xP7wG)$tp!wK4M({@zZ5sXx>0&PjUqg7HnyZmF|vLL zSXm##zY=WULu;jd3a?7WT`k4voir&W7LE93v^i3d4b8}!+B5N++6wuY_q z>>{L_`VvNM7-Xu?A~!R{Wj==YE;2a?S(js4q?`?0@UKGF(xip0)43pQ=eWh1^f7Wm zb0qLOeO!nju5%)$l31h3m_fsDVQOBeO*3DkoRC!w2z+XN2HSOFdK@YfVtgC$)ZVsD zdIF8y(-x6`<7367qs5X+WFl=IM19LQHa1FML%&>yh|ROC5bQ4w84RmZ~6yo{01zv9ml^i-9?0c!FcHOgP!`L<+u5it}Vi{Ry%C z$^~kuY=PiTjuwje5~jqtD;^<0*6{F4-U=t0CxTlxdm2!PQbLpTLR7_FJZz_eXCr2W z4Vyod;72J6sjY*yE@~A+n_t3XR&KK{_ZjQYLLov3-0esQynP_;nW!i6gX~eVI8p zThkL%6TVD2B%QKl!GaSR8nxpyWlU@~G^TkPVs9c(KY|q5FXVnU#ElBCtdhCE0*}aq zR|9*)GAV`V&ia}-4T zLs%UP31nqy5kkW7gX;z8oCfnWTf7|y40ei=AIZC~3**9?5d!RPSs6YfW9>2g;!o#e z`Vvmgi6KTuI9VLw(FP2w`@6bY&{n%-Kpeb}Hm1wgv=x*I$ zmzQH?;j0*L@j~uxrxOKQeTAoD6lsV`-ImAE@=9_qO-GcEl}AX761WKPsmDTM28!ku zK$-}3*9-2fGdINu$gK&P{Fvcoh zB%3SrKj}!Cm**I#{X?nD&}Tm|VVgbyjB14_bS{w!bTtfRFgJZ6OvwyqHe^Ka3FiT+}KPI>v#Qq7vGK$k9fw{?N1hh+i zG09&fe2K|530xmkG?xO(2YkVR(NFWM){=*2ME*=TXWWF?{sP}Ih9LrL(2YzO%Ecw<*}e!} zhDsR(HHJHj_XPyRp2S-c%i8|{Xsbq{WOX|JW0*N!A;8nMR|Q?Yh)J|E@o5n7tukXm zUV`Qc{1Ao&ret-J98M%lTpJv?HeM1%8o-zSLt2S;B6=lwipe#@E+Ba=Y=yC*kjP;U zPeHWbz)d?7Jwe44{zm2SR*9}g)R*j(OjE>0+1im4mWbCQPb9<-fkrqPB<6-;H;490 zk*)8niwx5BnVa}ct%{8W8jWM4WP2k209F!lz2s=;r=Y3HoP;ESt_1Ug3$@s!RQe;; zP`!@niD#;?t~wDsK16X>NNV4i!jgeHaU7vKDdDJs`;~_zlE`xWPsA%>-M&S0j-xBF zquSWkQzA8FPqI@pC|szD{=;06b!7OCgz+rd84^X&!wnX}=sK5z8pJd~Ph8~#-VRMS zVa7@3hc5z#;fhoGEmLxq#=VR-?3zr|;Fk%Z3>S@vP#PWXB?|j>3DELFD+)>^1@0z} z${Qh!A>@9W^gdAIEaD8*%QCEuvO(pKZJEd9%A#K;6S6FQ9TOkuS_=gznnYYSM8S}$ zmgK*LB)C7AgWV3Z_6Q_>VfZ|r)Xdj6r z_l!=cY{>k(6G?-)LUX=Er5S-XUIpvg%X0`%;un{29f~D3(Tvb9wk;H|J_0^j0|A^& zdsZv#2@tD8O;AgJPC_>$L?>-o3tK)y8%D&N_K8p?oJj_YZ=y@vGyJEY4Ahdgt($BpX|Vm!aCs$rDGvBt_l)59DRp4mw@GkGO-9L z3OR%HT#RIIB#iPyRdSEmv*_g(H{kW*dM;rRF*G!#FimhpWLF8?kdBobk&DpdVOo_6 z_QurC7@Cu!;kLzGx*kKe(xXO0S#p`nW~5(II7iT!mn7v9C2mnUih;Ls(7LgAk}1ik z;N+>YPf6_CM9Rd?CXS$3V}AhVJ1xwi9NkFfQop4m3Qk=KKfOKZ<_LpPIJPZozj*Ol zmUS)BDse2~CETkYFQJIxJ|w3{k$j~}BHA#U30{z&FJBT?1IJf=>zTg#JZH6)JN3ugCsSp1EBAX_(%E=&;2$`Z*SlzoF{@Bxxh%Ex>{ju`f zD3F%ZjFO6acpN$u{4^+vLYb0o362O{nDk4;%O-P@YKn+iE@woYa96ZoxGP~R^e$tu zB6BoJq6__!lTT=u$t}}zB@M|zGuevDFJUdLA|*yh*(!TvF|=rgLY__v*$h)m5WNl; z!8V-d1i6H$m>f$XH03zWD0I)s05PcdQ{zUZ^k}tQ_yrBh;jo&VGnxKWTgej(l2Jz}jnc!w zK^&wpl5P@^x<`oPk_2tqDYY>vEBx7J;#y07b;?w{omxz|s1%=WreI72XYZ2H{g3*{ zNabFJW2eI{9WF#dRfg$ljXi8~EeYHs+8*n^awYaG(LGW&nv&JXiLjbvZDHwivQ*Aq z#Ucc;9Spu-VG3X~EGeaA`#lxpapLYcNd@5K%MI7qpuv zm75Y7996S+@Ghu*lb%Rqn;fwhnSrdZQl(;yz_BF%0KqlC( zvL-Z#$oQ*dw@6Ekky7+KwL{g>6wu}kuO!4$6l|4of?FoV)Z|pAdLk83__7-GiMxJ? zSm(zkdL=6~i0_Gzr8VuyMx?k=O0bRlYNmrKW9^8FF9$knnw*i4Yi?L43jy@ ze1*%xVc$bGQKiWX7!I95AuTkGa%jXJ0FFR$zuhhjR7+!=vOdqCp}WB%J&A?cv58*- zm){2MSpHB^^wEDKzGb2u@dGmK&ZZd^PD*NF(9oxhnMpaU6r!x18J(`h@EQo^PtRst ze44Rdc9oiZHZ1c_Alts+!rv@OpH`qsPuvas;As)>966n;4ivKY3R4Q#1EkB~$}ANz z!FN_6$0r2Hq$AW480X+m(0u*~SdyIgI1=G)-}WKks|?uE{{W|`S|^01$qM9ZlBtpg zah;H&>{K-gs}oZzBJ4`@9hWrF zl7W?!f8!fkro@`|#AD7Qa6!y-crkcv*m@J*sT)7vX2^nOtzoU0l>3tK;9 zy+cEY?5dp&?6Ah(oIQ;_K^EGfY>(gvB+WE-j70O^NF-FHIwo!jB!(L+W?>dgOwO{j zyBC&b97v(uWb9LRnL_(Wt!afvTR#OV6fI4)1mAJ?jT4L}MCHGcMv#Wwr}iYW${mY6 zmeOwn!EA8Vto@mQt-zEkz<$xd)g)0D@;x*(oF>GERws6=kvpD7;n=45B`Vu5>u2;s zj>%CB((MVNUeJEX{=vPUv9vxcki5`OM^zU<`&4d~u8ouGKj4?|0#odehkx`OItizY zawMYi_EwP1QDk)tPi91_hQyI{%v%zKw)q;mhwwGk4@wV2GlPZciPgb8BO_`T3!%AK z*&c}Q-E=$A}2^u?w*W|3@z?a1Xxe90E!J}N?( z_czIQ%T1k)ERjgzH}EQ=NzA>Iu}9J-EVdbN4xm(b7+MWMyoo#ID5fbT1~mLGRvH}a z5m}!Mr33!}5eTvx#u1aenO!s`wXzy)WV|&(2$STjend&a?1ur>5&PLHAcd>wonS*| zh|3C=qIN@wA4|}iFyAd`UcN^edIt$L=zZDQm)4@>(v6)uhJhBC-!BA|bm+RD(;Z2a zrD6zhA{S!1HiZ7eR+M109TbR)u~MCfKR>b4q;@w-NSmrcAeQAba?s9K;4v`VA`&Ki z%xPJO2cx7}7X6IoHeo)BA=`N!M5}^4OB&7%Q5o@cN?H`rvTZH}D>x~>S7YdxQR8t; zWhoAwh>SQR*y^GN_k-k1Qok!Nw3vwAc6#u&`Qx^pg+7 z46Uk5LoBR4GII)uRH_?=G3dRih`oZH8k`g=5IDBmM9ftccoR68OKejqt_iHz{)bAU za48`M$Yu8=*L2$?N~DyNc5z_>)s-zhG8=g+p_FZe<;P_sl3Bsaqea3T=)9m!B~djo zB_NWIBT0v7$tAL15IImc$0z8Ubw(0wt0&|8J^@<-Y&0oY((DyInhM&FYO*=A;ZhZY zt+86fepEfUwWjFwrOAQ`(#N4zQzb?H3u;`L68SPxiOF`!vCiH}xUn~ORu+jYIBk^E zGX`Ru8C@~VB?&GHZ!*bE69|-2Lr^<}aLUSGBsQ~wR?;_lmorXCNt3bE-a10$a%*HO zKSW8kB*N0fVCfR-pMK?x$xI@=M-FY7D3VpB7i7YR$B7VPrGb-zZJUFcAnZjAOuB&V)UPX|Hl*tR0Ne}x(He<#&m5SE_&h}xa3MQVOhR(ivuB?ptq2#(*$f+ zZ!P1YC9Uk+fX+$&M$}Uo^g`KM<&!i`BWn=FmjN=Smq#aKSR%)ZR&=1`SF1OVlX_1E z8e~dd65w2{i(r{s3-BbVMXYP(F|>RPSJ>P=9=?V(pK@SRLP_EV?BzGc#>vpvNoe$n z$18~b$_Dw4Ap>oyP2jalaFUiMGllhXrA>*iEL3NyxV?NTA^u&tyz#SV&1y+k(!s zy3t*8i6$*ay_V}*XG`PB2~?+&^~XI6qFoiS{hxwx{PrZttl^Vp-L@#6)*#@~qOjBC zaSh=VhM%EM+LKcOz72xU%17ugh(x9m;mtBnpwStO~>g~nh@{Fn-dY`AqVk{lnRc3603 zzsW8YNQF}_1l(JJm7%zV+9FJrY-<|j4LCJ~_Byf=J9r*jovjUy*ZDeO8yqC?h;At_ zk+Mn@@IS4NH1naW8?;H?Fyx>Hp#)2+e2rv|kt4BiYQ(aVt8z7~LqWF5fYOU1-ws{L z86DhUI9Dq6G9jB-if^R}Eg{OEq%L=@v_33^h)Ah9sVPw~Gh}DbvwGH4CA|5M-E*mz=jEwDXx7Ie2DA+GJ6d$xJd+l(R%6 z>CS@@Bv!D8nuD!)2)d@hQyIC7iN%ktIkuuPRM=cLUBqPt;B@ChDXq^K2?i__4 zvNG6n;NKz}ix>)RNw#w&^$O0I^pR;Y>xaP%x*)4DIh+iLLK{JU*734A-RPaPAx?9OBuL25^kzLB_|Wu@=RE(aA#Q@;|NLr08=tg5y2~VqKc13r|N|VGNgui z(i?*0%n)y7WaGnGDGfaGhQtFtsU*7W zScVtTk>iGXZ2XxkR3nzQg*etoFUT_mL&}N>8Ml$~zK5`qVc>-_#G#VmxXB*UCfXp0 z!FEEb6Svuu!+nVId>g2Rk}5|H;I}M@MfoAiDVIAEX-mElcABOsi%^9$cV(w=Zv_pz z6?o4CCl##`gLy7ilAfW`kpBQxMNmpHDH0o$g*h1f3_BKuDcm+&rkK^lzrl7- z1G$zaASUONCEwLxtgQsPD%HbfPA32@~U zLg8|W(&^-6cU=x7C6v99t&X8emMI~)+60?4kut=#OQQi)LTTv3@9?9Wg*{Xr-@6)1r;E5OqKe@zWNr-yeF;)rByJJZ;fvobJCSMEU9`-daN86$X*lGR)X3bq zMfoyT-X6Iow5Nfhuf&0svxxl;uu8O(*JH;H$(QJ~m?V+%Z3#x*@DRRQ97N0!YT{&0 z9Hd2X9*K(VRFh4slO_psOCj)A1N^X#USC@y-n2C9p$9?)mJQh+(UhzwfnqP%s-_uP z58Px^e>cMLjdu1i%ulwsEX*&-FyUG^ky2&)6Ky4kB@?uBpF)DfkY>vCR-!1Ru!4cj zc_$h%t5!p|H`y&Es_5M13Y_b*rw+zi<0L$=DQ@d?qjbQg4-zTmgVDov1!$4N$c|rT zQ1(Oj1U$}o5|z^6jl?nuYb1!H6l_`i1(t_3X>ux7U3xeibW2)Mx)Vrk@@hIV7F?+h z=_E?7U|G_^3$e0r-61Wt6=|9UkgI~oZ1BfWvrgod)_skrHJ-)nQ_RlFDoH|0M6qCB z^qQ3Y7EoeRS~K)H&%>cY>wFd^#~~*hHHkYtg(~2hamcW?(BTqm}^W9YhqI*@`kfQ8_1@1J@Lqn!6VbD8luvq&uNMsc&P0KjHgfz8; zij>}Mb=D;(vdSbQ$_Y(5?Mrd0%>>? zRw1&{?2cr36r@ohmKYY=6SNh+jBzoq$$7A;bjmPw2<|X0Cm9#oWF~PJB?+XdM#hv& zv?8|~IcSF_BSVcWH;Q&OFY_1JLXyxVonK;V?evTdZz&xbLiqudq;5DF80fT0=WS&OrhwD&K-(UR_T!WxD<)>Y?@bmjoAMH^2(KqLROo|^3uMbp7yUs zjgalqqw>5kkwOlUp4d+>N|j(JeKS(1&_l4pdB+ zR59+ivRM-OiZQ}>BT>ySq0bl^x^DyGOid+DJ_h9o zO+vj3Z`u-6Ga7QZEh9U!S*9q2-`tT65rKVLmFodPQ!4ym+ONTD(xLl1T ztf!+heGLu5f%}ot{!ZO z-8m&Jy7XY{C6i<1k_Qs-Yw}>Z^mIRgxmBShUQ;mQOKu91e4LwCMH^2H!DYxvH$i5*TaB&<@^VTcq7!aLinNKlLM?DBQ$JW9MNtnsu`Ci-?7)hDf~M5GSr|#^jH;#u zEi342t7B<<(UseRTt)<&Eg`kd_KOH(xO|Fq!GUvQMP=}|C^&8KHm$Tg%lUymN#$Tr zO*o=dW<-;?`eRGzvT++4NTzT?KAI8{dFPSA)G8>bE9^mAMi@_HY8Jv!p@)vhN?TzM zX~5!@r!a|Bve&JYXg1reC6aj-@`!Vvri`Be`AgtZb~U8;B9S-9ws5GIM%qY(b5?{r z6g7GkrA<9-&0TPIh*};nQ^7Hb&m&N&5zfUHc?piya%|QHlc`Mp4?&njV}cb_q$G}C zq3s+~X_q);lxjahoD&9wj%V~KgV3>U1X3BkAuN*8rc{bEaP5XK0+T^a;hJ^b_uKg1_#%{O?TbAzv2O7ExGj2G7 zx_Ss(eX`f39V0?cq^X;{43S(nrY9nQBj}F9B3?v@vep?s<+NR5hf-DFCWxhYB{)Nt zdLB({OlMO;Hg#^qj(B8f*O$f&DstG=xoZUAoxCw8U}Dz=J5-kVXvp%}HL;N5aFIxR zdqP@hl5kjGk<4B?B}gaOsg!#-Xe4T0bTzBQj!`fwlauE}-W;W3a=-dCtiv44LMgll*%=k&8CZ5cN~yW6o#Kh zockrUbetY}98Qizfvs6DbYx#6LTY${JO>LQy*>zbf$}~F=&fvu)AU2r&WN7jnr&!U z3>?wM#m_4yOYBMF%Y2E_t*wc?yAE%#+fA(CL#nU9Y0b2heKIKGbtN?I0&S?lp~#oy zYeHXSeWXOsk{($r(f$OP(IHkog%b6$5*p8Cv@sH{hUz4iN3?3kOy43`E`o+KB-8wg z&BHAer-CJR@+g>90wt;z$t+x>DL9VU$rDkrAWjaAjif2-EQ;+sJ_;4PW5I*bz!ryS zRTfe553o%rl6xL96LBzY@u5cdiy1;)=aN~nFeNQQvjh%svJjs6Xi!MTJ0zOZk+rN> z(2$mm=){drGO7&Ai-=`h&)H&X^&?qbhfOk7#Ij=~nQnA9l$Kpb1(v4xB69DRHHYb+ z;ejc*Ms_L3(&BW+zzp=oKdIBOz9t@0rrk+IW}C#xJCiEut> zOcL7-L5b=Ifvn;N<7$XODh<-4*_{dZ>F_pfpCXkGq{pCg4`O(88*0{<161W# z23b=)3*cO3lWG3|!3-?C8joQ91#C$tWJ!i{U^)q=lWTB7+=XtK;*k+zqEMKi=`KXH z`A-C$6ME!odw$H*Y}!s?So8Qsiell4V4+cO7gjZuP&ZRA!74gep=6f}lHVq5hRM*I zL3|0I)t@*dV3+tIHpcjqEv&x?TpozUIT|@0Hchm#DP63N&~idUXl|xi2PUPHk7z?l zQ6YwDF9^lq7S!hQIox0~_E{a4f}?M!nr$m^CX-h7Q&dp2alsAR32dH-lZtSd+~7Jd zoe3*9NKiLX$bwR!Esn#f`zYY{5DjI>aM=wSZZG_#{q6AzF?<~8*NoQ2-Wf!z;E{Q{vlfaWt9P(g{Jt0M4qho+}Whh1C;v+su!K-2hk}Yq6HD!&6O)GR6 z_3qpfl@^@~ib``CGZfy$hGri2Ow28Q32DtXHzP>W;8`uTB5f(9fx!@Mu_W$w=&)%( zkuAfqN>M|%3iAXNFgP4V2VVhz_dJlSTSgem?7*-aQ)@U5gDcuZ$Z8pq{JI(%h+U3J zxkGL9Jp@>2auL|kNSOSKB@28CiKQHlkdl2y+Zfqw{g#fFk3A5hIgBE4Cf#UX6tZ(X zo{MGV*uL5vC#CxnGO+TICHW)S$pxz(nVtojg)XI$Ls%{RtD>3DhF!GK%r@m{MQ!P^ zIkwoKq9rVq!4C#e68R?8OR|pgA}hX6XtKnzN?1ujT#X3fB{APwMDH$#b%M~LCg8f* zmC|^EA!^4?Ga@vIGGFu}8*n*hkshFT>`GEv&{WQoT$wVI**z&4B|3sILK_!gES?LI z$w+Sf5YjZ2E`?wCl7zCPaM_Nq(Tv@@p{JRD*s^*UA>hhWl!WrB4jyFZ8K^>T za7kd#gClWrJ&e*g9U@>)l-EM!oGb}8&^44oRON71blvhKRO7ISXzm2fbl|)=zd~-O z367mUnCYaK!yMT*_FC}ZO%jGglS7gSk`t&R+^J-6I8=oGLAh(_Pl8wEWnfqp7=+)^ z5ijJ)vQuUWyH8CDTZ_ouxBOV09N#SMEB%Qyj}!PCF)0CIlc(Z}pQsIra2$&??srF#rN)YF)U`FSc1{2X(WVo`(+I0|^ z+8biFRRgjuUY>?ppOB>}n;Ld$PeX7vnv-#Ugt&i2=e`L~Nnq%az}F=X3L`?gejuEg zWS_K9#Om5p*>-Co6(r^KE03VkO>shGk{g2}aLDBr75jESTXyOL%gI(!w$v%P-WGGL{K(R(XwzB_fj8 zu&ro?GHx1J6pcK-a6tkV+F2@G*J3T0G9*dhP0_&hQ~4arPK@GM&myW%(AML^HY^1< zNi@cn&t$BJW0jnt4U+P(!3s(C_Bq`rfYPTvoEVjw6L8W|*CVI)e314ygf;z?xl#jh7;_ zN#YB(o)wO!OTD2-e@v)@m=g4ylG(z;9HjD2Tgy={TwVf_jb`$M&Q2k0qMgQ=+xAXH zm4Xt5wkt|fhlB2l?1a$~CNZ2SS*NC$h?S;ZxFIzDg@b3n>Z@4f!6@KI`*J3W(Ga3T z3`;%&CW?x6=AoMOC*_`!4j7YML0IZ>bWh6e9;#Y-2EMpx7Q|Cd?R67Tn#B(8BJ1zs!i}! zzR0*sof2f zWv2!k(1Y|8F_t}wKbdUC5pKsMh4y7TNx%-p{pOKMDU7jOZlM_(Qx81N5du`A1q#gQ z*a1o~VqYRoQnt}3aQDB*3V0Mn z8le>HL|DpvWT{hWa7j-s=b~IpVgg)_glOZWQc`;xCmH)09KgsZB3fFtBgbYp2%5I| z7KDW4{h>lrWyz8kCe2Z#*p5<_`Wi_y#zfNc2ua(@wuY+vy@@A=^V!Tlz-KVv99fm3 zq?99GBEMps)#WCWB<>8fi-GRrHsFJDu27MMPRYufG6}TZtOT|p@J3pRM3mjS95Ypc zArk{mcC{CM0SvniA>)YIF=_y%tYjQ+`&%zm#HBlj6ak8cA6Wby1HiIdR$-}Q_q|HJ?% z5CH%J0s{d60s{d700RL40096IAu&NwVR3<>5RtLLAkpFR@c-HX2mt{A0Y4Dk{utcC z{n>5<0OZjDYJXlz>SabE-Il~cfn$~T2ERJCoi)IJ*1R7704&P03CI^om-&4T-8q+o zY2Ck53~d=jvOT}YB)Z0F>w;C_Mn*%GMA|72yiCt^ zVJr&$jY%G%mISd{`XI)ZRuq$|OFx$|i5o&tzT-$pNDBv0qA_Qc+cF8Zc$moquG5Fc#P*s@OUtL!$Ft^rDgy5LgwqOrf1m~h?PGA4mGXo54NWU)G{ z#9T*-ei20#P0PWkx;GO>vl_XUeNao$g!F>4Y;}y&<+r7;;sA~vQsxaS1i#NPWY&?Q zpdoN5xQ(h4aK*P%W8s%ek)2x?i&u9p_ZP8YIunfT3n?;C9vI>LC_3>PH9=B{62yXJa_07%UaqoIPh<#`$L ze&vNBYS_^Qwv{P}AnNi1m`7ZI)`u}@PL#|i1%ZPX25~gP7nik|Yen;zW1~c^cs4Oj zw(^gxtP<6+iUy^S#;ztPON!Z%bp#dB)Nv8J+m7dDQ{;}m*Pow}SAF#(v(;%~OJ3zf z@6LhZAt{?KdbU4*Wf6gTfVq<-#GV#h(3ToG6z+1+t6|^aX=$>n2sQ!#03(jdUg{ z%+-7&cH1T7X@JDyy+w^dmF10*!83g_t9){UL=kvvymtnt@o0F3)c{ztWD{t^Q!e~T zC~uGSut61RIdn>{BW)us;M37BFP7i1%D}+R11Z8I(c=^mXhV#{ejx~!m{KL#v=}Rx z#{`c=yU1sXwngq?V9=EdXoBdMP|JL*Ex4qDAL|o8%UWN#t`Ge@_)EaOay{Txa0zem=SPF^thE#E=Xdxc1`By*68*-U|A@vufC!o za-D{%$i+&o;{z;oJ#55Wmtk?j`2kl5u_NX7s**DhCM-`!j9P5%;Un~S1U)cUSN{NoinVH(y{qbiu3LL^4K2Bc9e~jo zljDhH9lSXig^WNrAse$WOdS?7e^a7dOzCCHt@r|*z#{~KzKK|?Ji=Zs25qXC1*?mV zBNy!`tOX>y0#+V+#BsjJ(RUtPg$GSa&}E7{Da=}PxAS3+%Nc04qdv)re>GO;wH7kp zdaTHT(U)X<&M4E+5yhw^D2#Pp@nrW6)<-6z0ivZd@l_$Jq%ufV4cfrs=#+hi^io_D z!mWCgOCM!Vm6YX*9~=a^<~|(2i*#TDc5YEg^5X=D5>yH_lc5dPKCHQh@3`Dwa>67z z`$P$sj_y*mTP!l!bo9mEcnVL(n_N)H)smYKL+dd& zHV=jb<7dl=h{}-d6Vy?|SP(H#Qj)Yo5zN6{zYiQj+!CP>x5u=4*oSKfL<)A2l7HR~ ztafz(V$mvvYV^S%Zp~+K`|tZbGhdF`5}#o5Ag&pL#_5U?kVkO<70)Uoega=-LOp(?^LUn8moG9( zNS!dgmjYMOE%&LSMNNz56#!u-vyc=^GBVA|`z2g~1*bZ=eVCMHd^U&<^6*9r(3Nh| zTyYeS{tn^XMWC8CVp4XJBh()-RJge%c6^!U7ep*Vy^z(6yJg}A1MM=5V=o}g_Kb~~ z>95;ra4bQ*+5(W)!fZ1`CM>Kdt|;kJLDFT&Qwywz01&WCC2z%x`6S@R9fG;6Mf8mB zaY*pqqX@3k7vMIvZr4kNWY%L@=tuJeGjGG21hz0F(Fxy@75MCvE&$H$S zu!|%X(LjwOGuQ>nSzP-{1Oaju!isJueD)IzWGQJ}OGtNO*+3}k31oA%Ft@o!U$|XU zbCWO^Zw7=r%RPz>S+BUgp*j`7 zHJE1u>kFj=E=Hn;0aVB#20IUcLAaTl2ZqBrqoDy*gs06r!h^i}7}#mC!^+Fn+N~mB z2HBA)*%*Tl*gUdgt|5&K+r&5Jm`gK-ES3;?!D_;Qxy%xR4myQMtD_K0lmr_FTO8=7 z_npnXb3kTrSXEkVc=iJ8z6CpuXRxTVc52d;=c%S{}@gxFlbUMKN1LPk#bzu2#A zajxVR5ri1BE;+^E2-k)U0aysN_JcQykp6!I~y?mOXaoXY*MNmT+zzayCTbbr58SWTe#Yw118%9!y^@WVL zZFt&>RYele8Nl(DFedL)OvTvw&I2?NZ(+m zu*E7vvvAeTR%RGmtsnlP8_TZCZLV{0V}y~2g@H)X~HWI!KQT}Tv$;19<) z1WM4&^pRmSXcv*o$17efF}F6V&dA4(S(6bTg``(RHqgcj7_!EEWj`Ek9U-v<$#Si_ ztJsAs>vjwC5n^syATF_nNCQI$HO0_8DL z;A8?B8am8kT+>7(+SwqR7dC4fza-20{{Z1X`TND#gfGx? znN)4d5H*_LaG=^!j7))hu_UrPrA2${Y|v7}MqsTFD3smsk5w6@L?_umA{p0m6!u&6 z*`OSW3f>m+i@LQ>WEUh@mc*&cuGbY?xAz`QqpK$qZwYOGwKgs1kJu|{{X4~0OQ-;e)FrRH)XfVp=g1$0J%&#Nd$FqK5wBcC|76} z#5G?^lsAYsp*`B|obv<65!Vv^gv1%D!t6r#hPrHaY^3#D_XKr1FR5VqU|{#Jc|=0#Zt%!!2+nL&RxTn0q!impk<_JEW3M(9;$H?H+nLYCF2T_ zOMKfbW%j8{q6yc~R0ODD#!m|^)XAMSP{94sHNaNlBH=fco0d=$E=F&Fhwg%vKu6OO zCRSvEHkX=}DZD6@ZuQ9_hKisS3g55*30M}aV1gyzZa2Dx+h37hU~P#|`nWUtxPZNo8(0byDO-u0Ih3*`-|%H?_=gu1 zl+*w;n2suex?xJ?gGG71p~$2PTtzi$EDUvSW2!dW47v3jx62(SYh`krK=uIK4sf;pviBs>3wFWFEo$QFLbkNTZsoH1%_h2n zg_(z*#&`3aSC=v7=gz@F)^f(<(mO#;Xs*|mX9T)SvI1K``{4jKF>p_VKdPWEUZ7l} z;xF=J5hZw}#k{5?^9|lld(rY4+kyVW0+>Qr^Ar;r?uzJ2`q`nQ;K3!<_z6J$k5cM7 z0)e?{-|I6RlMVF|OCDP*I5q4&wTIY+;|;=RV40I?f8$$n;o;hn9_u*i5e#!ev0*^-4x@>x5G*(V!l8~6vC$gu zPT+f8$67^!T}L2vAqWX^EEqIpd62U28U}$0moI4ShZS!-R z;x)>($psB6EX*{dP|MTP4KbRzVaQG>+`I$`Ifj}>gu=1I?ZAmiita8{P&7m5cYHp; zC6E#ZFBTM5FSX0%YB8D=D@LUyhL9GRM<=m%K)8VjdO9stiY}!tpP<5 z>{K`~%B~-7WlfJAOB8gQTmFbTC(WFr%s(y_WI)~UfLAIL0Vr)g>3D`=UJxz-@`3>_ z!O({0c|(5<+zUcf(6IRhnuXPHg@*T`)PA*h`3o3!AGR`$5)%wfOxQ@~;oq=IMph3?eCq$N$ zY}Nwe+kgb(=GY|&8VZ^kpd`qsOcY{Jz;L!^a;;f07tO;;G)vZ|b-86m*kG!%Fx4&O zQWA#UOg@{;##_Y)uCeXM@IX@L9SyQm6B9`9ZLkUS?$&S8bsT$HNkc$*bHHk2bWzh+7%TDSB`L3YlVRREfkUGO;VysGuopA|f zrTY>rpI}>z!p@BZ=bM^9N^nu$)kVYUV0Es@er3rys)~kMmn!0D^Z67SE5RVDZPQQ2 zDUeKT?Gk~)6;!Mam{|z(lt>TKRlmm;fpqQF;$GX)yN9;l7)-prAz7Z%rE!U3*=%r> zU^lQaqaQH_Eo{3Rj4r>|3DUC5r!Q^5i#NhTW)3s$KsLJla z2se?vOS29^1}t<5X_uP<-qL6~CoEG@igJ4(iEz`bL?#_8R1)THVi5u%889iu$c2x} zSVq=4!F5>2}WF1Y9|Scl@)!#QP2S@@T!s6%2fkfs48}pz2!(0{qh^9iTZ(utQPl1TxzF2 zVnwf+keYrD)3y>dm^VZOi!Pz66hR(c9(UcG|cM{7(S}%~gc4NfK!b!*E2cA-2gc1bc zi74P1jS6?lq$j6}6^2Vm1`! z%uTX1g`o77cV%B<$mZ>fU&eqfMDALSbtld&(-(p~O2KTS?-ru%puXYQ%mGbJDSrTq zoL+-h1ICX?EV@y)<5=WuC*q1BEyA?p4}H{LsbcIN+lxBMJT7Bs963U(W7G?VM!lsA zDu(is&2Dmo7A3FvKI$T^D2Q!|x5eakGY3~>*osKB>m0#bkDW?R7}Sf7pdW`_iD9NK zy#w<&Fuh|*08LQOCAjp{WxSXo-)fd@527VaiL-5pJwK)_;g1A&)?2sz%e$Y<86J9E zML<6Z!)x6@_f+{9URVco4o*3_fG$_@1l22N5OBpk;}x0Z8$g z3)hN#m&y>*eoXwRXaTu*#{rk52#TytD+V1v3K%_Zs>>_pV?hQ63ho3eAXjl-nu`Si z(AZeCi_eTJFxRz^6~M*x*r?X=43w1s0uC^+z`=SwIie!Ws;u{)CHZQHEv|Vo1Os3h?s@3p6-OiuDSB8HDoH z1Q#}jO@dQm)81hULB+r#Mrvq|{zmJe7b;QaHA0hhh>nK^+}N##PC^U?W&Se#*=X_s z4QC@LaAN9y0mMjMXYS@#5EXb+W-jF-HqZa70WCC0FaDOrlsMPn)G+S5ueMRgr4Pw}P7md>IrHOnuy1jNXjI~rOyTqz;(1_@go9c^RD9U4Z7)u02Esr+3BD9NJ z#vll^_8?fOK9P*nZAHgn#OH1T0U(jC*4QJdz^F_mmXO%XRRibsguAIte(|)3*Kf?p+BNDLBg z7ElGM1rp+1%>pGT7YH{{~vK?hpAJa30 zKTx)3cY=hLYU2{s$$SwP*(KJ*?`rm<3!6}f#(F~p~+rRsfz{Qm$q{9LH3)LHxu zK={oW_@Xyc)BZY{C;6==UND}L!AuDTL4e=&Hx{MYyg`+iH_T;R9s>}7Zfs6?1LgA{ ze5{^Cd<+Y~Vkp06ICCszTemTY0%Tdte3=PupkeP7Z_>s<1T{O*t4LsHML^1Wg#bo^ zN;TB zjF;{=_)_wV8LQ$kvN|%<#;#KgWk(JV@DMFj9_M5o%Lb+%Hz#9kkJ-5i5=ESo)H9&L|DOLh&0Sxy$0iv5FsDSFB z{KCU3b4719u&@kk3}}xR4p0bJl@GWQBovB)iYbo;!e!OwV6H0V{7RX`s~E|RO3QT^ zqZ2>*1C6h-3_aPsEj@qd4&5)=9lqZ3N^5H|fTCOF8$d=KO`E6@s#uk2nA}kR0BDTz ztVxI)en!CLu!UTB;1>%Lh%ayhxGfH;V?M^kPb*jCFR3&IJV9T!w1VZ22&z2ZgD9Ml z7Es8>ZWl!65+)30Jzgw?6t2MFAv4w;p@e5}nt+p*B@j?$wqiL3-c1sis+L-1K580) zkMm_z4Zyn7n+1yCP+bHbt%X3B3`T8pi>6czntedlX1%a)gvT1aB&S?Ys6@y%i{T;z z$=lXq)Ri;>0;9>yTPT#54Qm~e)n<3OSl^&JfY^BxM6EqAZXQ=8_9=56{if-(>iZLr#1a4q z;PziTB`EQ{Oy9em$}U>J(+px3a`UM;N--_AAO=W%jWB(vz#h9gl(s+)W?~>JBE?WT z_7sk)TQ@VlI3+C&0sPqkaDU>OxNT{Mc>`}!r|rj|W$j+a2%=Vk$wC)+GTdj%6gY{_ z{gil3?l5}>!;MU21j{Q}#BH_;!uY#Ft6efiMI6%w z=(txA=usoM!2_lTs$8X+EE2-VL1}KdF;TO3h=4>$t*b7Ge4bRo8e-))Oc6xO0c1=G z1!g--qN)Mjt{g`K#Xd?L5m;@s8FLUVlzpME)#Ncs#xkR5 z!v*Z<4aT3EVA9bR9t!9qwcskud)=ih?tbLYZw#uIz}U3%(B7`xs!|{QT0mgy6Pnj2 znXm4hiK2nqEs#}zaYwG4O~stV<{_d5gr5rnX~zdKI%yW0ZUAk#<%uB;K>q*~d_u>r z5hKZ3E!`2d0WJ=Gtnaj^rz@(x`*#ZV3Y<1={uop3x{xg4LeA!0mT^`lL0}VA6wSm0 zwRI{}NtrGWOIxVuq$TKK)F{OTtUvUK0@hWdA#4;&DG&8036$-Rok~Xe8$?(~R_twx z+RI$wCB!it%)tV}v2hC)xhoXtw0Xoe0bNS76^(LY9e0J05Nat;Xq{L(waT#9F{#3+ z6Xdi-Vt;Ug0Z6qU=vdT55jgDc>Xz-%GQq1$VScYEf_fZhpc@0LPnKf<|+{VvihmkHPxn|L0E7)QNF%h3Lti*{3U>49p zpp*{?4k9cWC=q8csdD9y$r+mBWO1Ypw8a2ciF?1;WFoC^61A@}22g~vOT^iAlPn7s zj$`xj(&C2`j0u=IrIe-)NWt+e+gve^q__9HU|wcN>`cwusH^6}x{4U-E`X7R`QeQv zu;(rjp3$B2ja3N!BV|B0ipwacCyYgA!0q%QUQ;1Rzpzj_D2!H3S!u;fYPZf|1N0&c z6Z}<(2!N^N$VL|K3W!B&`Ht;>Rs+Q;x+M&?1Rjgb4C$fBvov__9f*Msy8-qFG~9)< zRImt*6_h<{gCS};mmJHvi~&Hz*s*?GFiSd-jj~w&1_0S)1~K8%4dM2Ll|8RgB{xtT zOw=x6a?{k3AP|$>*dC*VTuG6pFd6{e!#VCt4<=#b3k1fjr;7MQCKgi+pQ;C6`Lq+# z)X@<36Xd_f77Fgeg<$^x5@MO1Rn#tKL)w5#6weitI74QTT}&5T8HWQLyY5rc%WIiz z0(22ARQH@{lyty`y_b%qJx6zlCj_3u&?)Sg&;IbhuGSn&&_F_74#!#h6Q0~}s3R@o>{i2X zq;FMDs-PN`#G|4MY6K45N9idYs1_|8g@=}~bR6JiVvo;E0^0S5m{q}TRg&Emq`QNP ziLlW&uH{c;E>X>0h#`5dd8`ljQ7&XTM7%O5@ z!{g*pAi{(Q*B4w?Ok1e+g0)q5KO-@vLNAfaWrE*@H!mWY4{uDsR|^&B32WY|SqnYmS{9;zB>8Geg@%qL@i|vLBfP4Jz{0-G?DlbNW)*^m z=`EvS9PA#KA!iRTv}s+qCB}m*@-Axh!?5Z=9S9v}k~-2*Hg&Wrv8MxOrGIos4g1oj zOvvQZ)BgYsXoG0cR8RTkKC;@ne#dtZA zmD<~-6~CF*CIq&yfzdYRi(zs3m5iad)FAjr^r=cS8Vyz|E*wZDU7f`=l$DOiLRnKl z>MO2T;Hh+uXuE=r;b~V8HPAAdVYpf=24*W0d~qv$h-Wbq5rbKbmfJU>0Z#Zom??pa z@o~w_LrhN>hAOH|mL^2(V{T)ExB#D1F5tY&I)GZ$%RRj3iSw2Ni1)=BS1 ze}s902(Z_+C5VFnT^g^ErAO!IKe>b~z%3wds<0vzY|yo3;)vF60+TZ3K^0QvAgtafu(9R;-)i7GYANyr6unL^CK1(fDz1rn4B zU2lDiB)VW#ST-b!&Wt|4PB&Nn$6%|04B{EU)Y?o669~!~nL^PB+$_U7$0m~FhSg*h z!r>QSKI~)J+5HS$g1Y2kk zY9t#N4x-bU zCzwu_SVR^VY~2Ecw7U+&X-k!B#S$}LLF?TKzvZ%UoF3w$~s~E$? zK)}_@ILQ$1-8IZsSv(ltI%)t$l;B=w)n;1f*enmE#9r;Z!xVC;MxoaQNsgo|bZ_D# ze+pQkwSlt38u5nnHahVHl|IGkBDd8@Tj25g(?JLcS8c>(_?E;H6-{N+FHJN%Osyxd zwu?E`(}o3WZEONMyTCL`ShE3bb{bzxmPIze%Um-OvmU{8E`kBakhhAN6-`}Pil5Cv zDMD6>V6Ht>h>1P3msP4UF70=_m*e1u3mIAiF79BM2v2@fB=Uu!Nefk}vp=LxpskIe zGCHaQFtY1Xn0Lf>xs@Yy!$U4KRxGKeufNHVX$RU5~PNT|AelR_KR7UG35E?Eiz z%G|m9K=`lXSJl;@SXEYx;QRz^A#^3;+3q)T_**ceM{C?DHD(&rss=d_RY24Mi9oi4 z*-nLg=H+h^T*rldpn!n=(?7~~Npwiq<38)=ret5V8TtKD6T@5-7Sj7eiSCw>xuJWO zI)QIQ=xgK^?!p3bE4g>6O~&u2$jm0x67&Iq=deBKiV<9UE#zJKQJC3>0R(ivPkS zn69SU+)agRVpO*v;~XpKvatBC*(xk3~28E|~)iH#~~(;}79hIc)PzLxJOS5x3h3dE{) zYa5i5Hv`zGMO3lrl#sr&7O`WwRk?Q8A3m1!XSYs(MDBUuD^T0J)rK)sUIxAVMUVFcxno^*=kPv*YA2B8vEy zp=aA9W*Bp_Kor2(g)l{-I__6mh%hJ_B_$OGHkgWS4~8P0+aK7gYZG>dWhHT=KqCS{ zv=Z$B^P&xBT(vFG@KS`(3?oY248*txV1m)3gbqbGqt!Vb#K*e>pk}2s20LY1hV00r zpm`0WA3)HR5Dns+VL9#|CdQkt2oM0h3_;XsT|(W5lMuSDag?}#-p(au8w9&pP|jdz z38DbQTDkZIZ(T)Zy$oRLQmt`Q*;UQv2GCWQavI1SzuBJpreJCxxmdYE> zD;tntRGhiuZwbDE7i;-4)mil*cU+`7xAnAW8QU=qmIjqk+gS-jN|w+PEUFCQ1rq47 zvCiYq(a4XOkkb}c`#^}FN#$ehjY(6}fDm6dtf3XYR)kDEekXl>k%$DGg@~^$m~I<_ z#a2vQCMB)zQc*Ho_W(H8uoZ4l)iA?X{4m;8GUD!u#X;f~m1Y=&bci*$bD3)qun0^* z0B*j7sRY&4Vmm2er?HLJJfa@OKa85{9T?@^Lmf(9KXD1Fjni0<8FL14;VG+v)VJ}; zD0tVoj^XCe-PdZQ2s?-VqO=Efr~02jOO9vC1SMO@0Lqp+TY~;^2**4D+Nv1+@{>!=4teY=m95xdW^DL58;k}z6vRW4s{;Fg>XX_M@K1TW@1h3*2p;Kpa9pKf=|iQ;M7%%2yK?CL?-3jZAkaHz^G>sHTD! z{oJVqg-Aw~K4_sp4mAu&HQ54HmYT}ximn?sR?-Imrz?d;nOSs!Ihp<~B z^|;|I=2hxyQk$DW=3@7l;@fBRh-71s2nf60L4}bT%?Lm3Y5g6E{zI5KM69cJkxUK_ zW_pl?m-h`3twskbvehA2$|aaT*e3xJS6^lfW)(2DCE{UFL}V|j)Dp2%(=dlSxl|5x z8%j!j(m`RaX~74vR~VI!I)f;v$phE8Eu-y(#8r_<%1eUWdQBl$Z314`Ff0h1U(hKO zH>?AU!SsF-z^URg({138iWuDx^4O6^T$5(@Fk7HoW&Z%<$!_~fD2W8NO-i|~G10+% zOsUfJL4nCh1vPj=Z04e~h{trgw|Q;OSe8H)hJXTNOO-{bQe(_5X-IMiuYFliIYN}j zUqHeRsyXt7jCXpvjen9QpT{8?ds%Y)pPaJ(Kfwmjd_}qSa?kc;mkP)B!a{x(lvyQP zNBa^UjwNn0VQN5k;Rs|m?jZb&l>Y#CH(~l9h?QNkSp8Ovp^8+g%}QNqEFE0Ux|-^k zF5zg)R-lJ?mOl`yi)_rN7-e?Mv@pYxZ&;YuGXbMA!x3DtPG#l4k{auQYM?y^gqc|xx*;N0q$WxT|`LPcvhf!=?EY!Dxkjm3$} z8lp3zk65c;iFen+UvP8?S(hZSuXW^jV6+V$!)^+Br*BYNMNceug%J8r5?nwdGS;TTD{av zs^zOme6_7PC1u=AR1hvzAK!(G2AUXP1ao^ZE976IrMd*|7vFI3`56<^^2~$*Z7nBV zN&rVw0vPN;0DZJ2MEL-$zO(&J=jt;kIfRJT&O&DOvJ}SZnUsqwN$NK!Ov~M*1XkNs zRtO*gz#v2&<#ys##2yQYs>RANEIGC+&rr!w zwFYJ{GcGDqPA3?anF#vJH&X{$%u|RdDhY++(1+NwoJ0~!tUX2d8Favtt;13VU^2K5 ztfDP$da>`4GqhmkDY{sfjldf>D7s1|G?5yVV=~Q|s0YXm+@Y1~QOs(>1143^adC@f zHJMIXKwy(D*YJN4QeaFQ`UJxLyH}LhMS|CFQuz`va?RXc5uJ#yS3Xzp&mw`?YHd}A z@VIU$z0{?K-B|$yX=C`sXN;vYE3rj^!1ADAW~tM{s(l-Zy~bnpG4FPv8Me}Ns01j! z5-V6vAh?|7rPH{pO+8I%xBG~4D=0J+#WPoo!x1<&I7)1gK^4AJS>8;2{Q;@5ZOfe4 zXhOr&E?BXy2BQANgm`gVP7vI+u;z%YdjlfaQyu;$sQI-Nz2Bm1d}# z)E4=Qyh7@Y%0^;jB`+`q^mT|AUcAK-aQ7EOjmzC=?mibO-Nm?BW;ALU+UI-jYu(*Yv_96hP!EP{`VzWPvr7#LKsb9FL)oURsKz4x`u|2AmXVAeM zb5$^wY`uA|UybS-NX|@uL})j{68T8S22)rLt@It_R+{U72@l#SXfv#;FDl?sJv9mPi}u@<5yy8Y>kWbpeJq1THGKnMyO6k2f{GV!TwPJe8e8 zBuPz}hDiYM%Y>^O)UMYHzAhGsnc@`VFb&wzEnC_);^rB=rSxea%#8-gXM&^3uuME9 z6^8C&_*lT_Re);Z4|1V3YZw}W%SGnnnq)JGK&eQ%W4fw1f$gjc-hN6>XT-1UhA8tW z>Nb$UY5hi==E=Bq%uCHgVdiAYI7}7YA^5d1lrUUDU$KX>2uiB8G3f&?VQ;djk(k6D zgu?SG#J`P1^1?Y>14)XOw#MC57OiWENwWtRDqG7DSQ{gju@n@*>jYN!M->Y))yfv= zcM!7O#WeFEg8Ee7@B*ca z>y+u7^CxN((geWkz}_QP7xNSLAi9& zMbcL{fojWHog9TZ427#M)%Ax?60WKXqSKMGqPiM6R^f`FH#ZTGTPolp9P7v|Z7_c3 zQ%6?>umi&YAK^2BZ)`5M0%rj%N_mvinlKcuZj=^Ebo&sxqT5E6RK-lS`-a=Z2XWjJ zWmHp4#(pxFIh3r*w~`UO6G~jb*}r98w;LH%nM;ZH9upiF6J}9WsNIk$vEA_=dy3Ye z6gVVY+0Sz(Q+F|Bh|stXM$X@mtA2sBV_U|l4*S`*JEXadZ6%Pp4%t~)3otx~lJCqu z=J=DAxEXi34^`AKB}h2VmS@m|Z0y{O-~#2UMK@-yDzN)T4l67l*$ZJ9O9ZmG>?HoC+? zY5}5X#wCiHfx&Y(4YG#}pyieXXjqmmjI|m;v`J%u-X)btSM3{Qg$3XBxtFvMP<%rA z!KI`=GHT$P8hA}m_)KPRS3w(miO~G8xeZEkC?MY}pgD)I_(6tnD%e3zf~v_2$FhV2W(*6p#*89r8{2k*`FTx0}6)o=j8DA(N1 zfcXm5#lq_hF4>(%_+_pgOFqqBHwNXmyi3j?s)i*s=My*`lMLLsx~a>gvs-04%&c9{ zLf|WH7{SOq#V-f(0d9;zqcE$>L|b&7S%|8E6-=Q}N-X@4sp~PWKY@JQ-w;QTZq!2a zs24ez8;(0fZ}-G?KwypFPHh#TAje?tP>qGIuIicIX(ZEsOY0DP$LSBXPbOs=u~QFia;Ow4x(o>=8_ zNNbovd-bbz%!Rs~)(k)Ph2bFFa#vRVx|uu6>v$eVyyU7VH-Zol zJE-pn4{IO^8levPG zhzLkxqS|fk#cLSSO_P}4X=?;#t##ykS<+@KJeb~Lo=?d!A}&CTl041LQd*dsL47ez za$UrsOHQ3#LVr7930-O`rA4nXrGn-#+zAWT5ch`u>J!@#Rjwg|Nw;vyZzTcJ#5mCo zR;VRDOE6@BXhmy^g?E?>`x~;wKQUMaSgOGVbt@9w8Gt$Fddvw+u$6k^SCB5N_Zu3g zk-$_s0htBS9AaOj8>DzQD~B*^QT7A3a0O^(*tKO)!*a2VN@WchZi=XS0t<47Ov{Xu zd!`VjL_bhN*85A2%cv6iotVGxO<$) zn#8;2!$?pP3rvY!#N0igy+dZ{29QP-%U)u)ov{+L1YEQCMsI5 za_r9F$Z2$wv#UCGz?{SmxMG)0r;_E6`09a#7Fw+h5X!(Pt_Vy;$n9liS%P##tV%PR zhlz9STIMCyIOK@FXJN;(?Zm>=vsDh9zCn)YZdhBYi*kxHUB-)r@zkc94X_UR=gGlQ zXu^ZcEzMO+S~GB`teiTEI5+x*wz12mQ;KS!s=7TIgeVoVHz7a|T) zYyDzLW#?)a3hF%yKSh2|u0Na1&FR0hB>oXQ0z4DHllWPe{cq{^WK9ASvgQ} z3-LD?$^bIR=pC&G6HkOoYbRYmqZyoqJ@plv6?`ECpgDVw>|oOd2yb1~GO%hZOLWl$ z=dd5fT11nTN49PPIkpP9V_d-wY81H4))2XP(MbTcS&3yp3V6t1&8hA~Uo69PAK)-=5*# zCX9-@ z+-ww5j<(f-61iUl^TTL8oCSWL+B+F61Q9o_S8EWoLDL0WsM+Co;#_Y~dP+#m_(H0$ zJ;b6En|i5pJE?BiG9F>*x!uKY875L_f0#({ff--isLZLbv@crX--ilK5PILkLU6w9S}CROU0HAR z!DV)N5zgWyqbLmyO)N&r=Z<9~t%B-ah8guGOf@R2pJEf0OW?sS+Py(v-r<*iq8_5yHJ!LQ^>NqZ4%MZ7@-6cL7Xb(yyV`kJ<$zy$O{-VGhMb=+cBORW$U^Di z8LPd@poce!oJ(_we(Y3wN-wy$p|kuVAkm>2dzJbOMEG*;xEeT%n-2w<%HT z0oltev>DV8M%Y=HAKWH8gAgVx|qB`%`HXMI*4yjrrsrzwsytPW${-t zw4m-GR018A1TKv*Syf!)a}+Sk3vj6>OfvMFIMV=0nrxIPtfP|^qFMCJ3lUL@MM7J`yObr+*|~KSJ4U$MA5z81cuO{p7l;6ml$e;2U|Xn3LS^&K z(#!w=eP^=xYUO8U2q5!Z-)r_+kf%w{uoJmiW6Q0722@~(7XJW+gw3vwDl$CM@`)Zr zQOq3fRAoUFxf%#+J!p@!0dI5K8<<%LW^v>tbHxh>aH5(;Xt_f}GKtR&)in{50o+l; zxZ4>KD-L1Maz_ML3TK9vqxdT|?@iPZsqC){ZYHHK8)>d$Y~`?(HEQT`#Zs`{;x^FG z-RdEhVW2INS~C|US&}d%Be|<$1$WO9^pvi&!iqI~5{0z(%A$*vcS!5&0|I3xc-Vpi z0x)fdq|OIwSOJ3HUeVmT?G(alyNiW2T4G>*_EbuesHm}ug=)#LwX4WL(MoiW4=2e< z(~5u_R^35qqFD;a{Y->zw+qsiATCyxAmGFjyy^%wNOo!x^N(RmE1XrX8ABWX3h+%F zRm_<)6gnm`V2BtG%njjk%})`db%E4uDJZD6r4g0DnSwT&+b>!_Slh9b z3v!0PGn!!xB0)@2*|UzAqj88^S$exh0wA@(bp-*qc1XqyP*7qo#yYk-#5_8g#uNWd*wW%QN+ ztCr>!h$AH`msO}=7Bw|7O`J|hV3Aw#AXE!XG1qd9nG*>Ow4Xv^ra1~KK^JP8>^h7u z7r;mD&D+6XG_W%sWQ~)ejfHEt)xaP|bqbe}*%w%{=DI@V8zLy#nCW+9#obJ$4juU$ z^kk~kw?>8}6GXk@sXf;~R`s(Q|B*JWEpl0239SB@((x zS3{lDD7;nE+$^Y~P(og_GHGey@>7==aHls5tI>^`U|R_i&Q807rwG6Nu4GmOyLVPt zURL{wXG{=_Q9!?n16D^cx9ijzF8Ry}F3F$FEI|v}KGH#k+%ak7lvEX)OB|2{v?C_a z$eYECy|G&(1gc>LkgFIzjH)+PuHw~EOA1BG6(wJBj?t^x0afFs$XyADsBA=+-X%FuU?zyu5h*xu%(^3qhyu2Yw^hAdz8+mpO4B z81+L-q=k5`8KQM516qrQb};h42vGr%w`98rIm4Fhy%*zm5_T!Ef^CL5;p1Kg-072L$u zV>Zjs%QBMskI`3Y0Rf}S0VqF-U^MP8G}^&!huE$PL7|J~fN7-gY7ZjSM-r=h!cr(H zg&K6zE}Nlum^Pz(xokkGN{j~`%P>Fj3lK^>_5*~7K1YF=BjiO;ukv06$0SNgM8vzb zj0!g?Fu|L+im7Hxo`?nWY}FJ4eB3jW6DlgIC1bruA|*lkO4cQ)SeUaBu!xHZeVors zvd_AX6?xoKG)ug$2&lTE6lFw;sevM_#zZccl$B+;yuVOYnco`>CBZ36uQ6A1QbZ#( zOE@Dm0MSYq4Bn9;D*nO(2_8y`b8xZFigDa52FlOo1KvI0KyOq7bBuMs1{1k9Kaw#C z!(<;3Py#A}&uutPhGt)C?IxJU71RbuZIIkt<;nd(hREpZST&m4%)D;S2wP`F<`t^A ze&|#L4g;#BK9N_HOFfRH$#jB=cn-$55caq|hlL~glmNo6iCLLOCy-p!#8sZ^1&x0m zQ}Ybs>`Mc>GD4DOmaE1!s}5UReL;)SRvM{>Lcx^8F#`H{mPW@LIDlL<8z*q4B zrvSdC4yQZD3bKXKA=%XFyvECTz9U0L_uQ(c=&W}H0HdbrA~YzL1|>E}rD}u*EXJeTqjM-f249~VBI`o97%&qMbp&0%QKY)B_7u7W;;S(<1za%B5$^XM z6|7E5jzF!mMQMUx3KGx2hN^LKGUDP`1YrsZ4a;QI*_ak-DnGh5rJ zH=ayJjLR+v0ZJl@r%V?{V$d;erB|vNld>vT-w68+Nh++5tWeE@V|C9e0#p@nN-=KO zIEx%rF|r|c5#8=tS%4uXPsefd)CdZJGo$Mxa`TFINatjBz+wldJ+!!wq1N%ZhVx+u zOCG;;vxu59ps32FKJy4D4anhEg!cMjx(fw=r6y{1Be&>^>(fg82bK!uEW*Hd_t;R` zwiw1+DkNZw(ViSt6jNEfLUe`ipN$%U-VpcA=p#7yzmyEZW+C+6H9HYI44@8q55#)SsVX ziVk_GI}Y~~gEJ|TYNFMT19*?>kJlYhq>95dcuF=#mbWkB&>3?Pw=L8IDGyX0vpx&25Wx@P493dJHn)uR#D=%<9(nmK>-6JN*O>>x)3oAMypop}TtWZT>krxlN~xj19k+c0vtR7&=yXK&Dfm3RIi%Xv%X=_5$AB|wqo zQ841-8^#LI7$B;Lsgg`8s1q|CWT5j3TV~`a0Q>ABb=0DR9-?(wvP;>MeZf( zJsm?DZYy@>gKhdsvfD3Ap()~ECWs>7MP%U5a9x9K!nv_pDxquk4BIHhqbzR&a7OB? zSgjPx)D`~#-9S#vS=?KZhcf0%1r-DhuQG==x?&o-6AdO^LXd#KTD?n*0oyB7QeMFQ z_7FQPDBcbsLX>b+a5wZJg5ANjX&1^SP<$mdqb=1{M6ht=JwRiJf0$u<&7?urO{50} zopl78ruPu%h~|aV5qz5&W0n{fVAIdCzpwIJE$Rt*+Fs#F)5Ws?0AheBJfrQI@DRnVL@$=<*7=Lc%ctC0R-! znx$Yg44Uc_ZEUkArh+K(m!EUnS0%?vyqi6O(i3+96ts8b2PRzG z%vPGJv$}=_l-ES=(=EmFw1Y~CwRPU@0WEes)zor%t)z4c^@Xr_CBod$N{SejX=mmx zoOTVtm2@v-`6eJKwCIinbE^a(8KI;M6#>GJ!4mm~_(gYFhYObkY1655pPQ9@@>O9Iree}_@p zei0SCkU?7*WlW%KW!$}r8UdLhRA<-(a2We7upmWB1%x#h4a*7$4cXK}-G~E7>T@ql zj)rvM)xozWKF&_Ha>N)57r0qKyi1b~tJpiG%vFO@@DXikrZF*@EX}1^(f-6p+E$2% z4_;z!C=9*KZCpVp%Gtup*OHtX{{SLTy#1DB;dc25gt&%Pu4S&2T|1d!t8LZ697HxX z<1F)1%sL|55pQhbG*3FcY<<%=%ibW&}mgPyWgcUcY55AT^2Gh*D-f z0y8{5U0e{0i+)VA2ZR;nj+|Gp3)>gj@>DiOwtzBCsWM@R6r~9Fh=QFEFSxe@MQ~n5 z1wABMoslxP1~3z#E7}pH8yxqPw<-qp5v?y|B3TnewAo?S=34-AJAzWa<`+%4*%3BQ zCu|LbK4jn^6gjd~x!*C&P=y5*oArvVy5osSwrJTXWlENI9o}Xd&aL=LaNGh*Ucw-# z)%ySjZ*??$5kTgaz=^F4gb?f2bzv4?TVhK7xk~>4xo~E66u^Z7V4uiwDq@Z_C9MT; z2rHAPq|P`X2#AAf{7M4DvxtSqb-pHfbXQQM4LZ4F!^{x77hOXUD?1i8whDfsL~$In zjfP-_x_XG>Q2oPqZjZ5dmg}kEM=oJ@7_$YxQ;C19Ogm}PDGIl~B^k-$Y*tN>WmM^w z)-BZ3tR~os6_LNP&rQn3nK^|n+qMZ{4Kxra>pm)?@j zm!pv)SK{)p<%7#)X)+5ifSV;ioF%=P!P($`M|`DTu>%>aWtW&^cf*CixFRj-i|Jg; zVhc1V9vE*U0A`39R+rkjA7E9KXm=6!lioqL5w%u zHmrZZLjZ;3l&zDmK9W17dGTv|4<+DS=FL9A7IfcJ24P;5EOESi62U**dkRa+FvTQ%HKW2hsy9Yk9jVJ*D?z|4i!wOq=0(*FQDA{A7!)Q*|& z2LUOxq+lv4OjxME5Y)X?7{4gCk-^%H)MA5U3!s`>pybCb_JtJ_XO^o{<@P85Rm#@E z5aA_S&~Z#mysHMH7jkB$(MDmV-CD2~9E`JN6c{h5M$-Ygpe+9YvY=41Cg)v@xZE!> z3Kx0SrRRzE8|cik8xCFknbq%S5SScqT$<0I~BwNqeHm28wX`fsbo3H z0|#7@BL-TCB!HQw_@;V1FtjzJH3ot}YN|9X$8`W!8I=YQBSx-ZTXlb!;*_Q#OH^6N zHY8z&hEKAkSa3`=0A&3o(?=$4<{R>KMTOfr4ka1KgWDEqZmJ9*$l}~YQTSG+x3H#C zj3yY^EvqtRrP&GR~OTEnGez&&WH3oN~fHFn|dKu9r#K&dkW+ z7?i+<7TX+{n7vtg;|2wc=Aj!#Wk1KC40g6+p+{;OY7k3etrAk%qjh5xi5D1Zj5^M^ zi;KL{sIFPH94kbM?A_DEN$u^|6GB*KH~NG~sYlU+BEslUk0YGKjvwDyh5UgVbE`2K ze<_A4=(bJTJc7H+&N0b+=Y!zZPEKNu^o#^Dy@g{JGVZ!aMg;A?P~-ZuRwGlRrqemqY3nVm zz{Ax-U223H!21qGuK6GgorPZ$TpNZrU;{?S#z+T9GZe=q@gQ`@D2zvxWA5xk#FwC1U(Y2njl%aE^jtk^| zUzcv;)QUyiXam;;fBB>L_7*F|8R;M7`GX?oq6YVSSYnr91H{els!P1$m$4fe4WX-{{fgSen9e|zD1rA4N*HAI*o|m}P5S-Xr<_MsD z^>~FZ+NPlOTJFS$31;&0RTLOGGbp6vVGHoU5pDZ!VUp<{i@oL&h_qqx)%vJ>zryPX zv?c(u$O(TZ<;hH+*V6XIx+y2zR!xB4ZU0MV(Z?LYZ<6jC3o{i7rzLX_UF82W-w5)C z0oXgj{bYfO?9!HsVf|yUN$`JhsQEXl-?{RX_ldSEGg~4_{q2$Oo*0KoRsgO&=#Y!& zOug^S!1=*{^&5IokZ3K<1}|u;v&@9EOc}a{#CwK}|4>G2*#oUk@`hP4m5Q(}pOSTC zm-qMUo^0x}mG1qBmiFqEETgRR9-I3!+hlk`4W@sVPN@;YoO1#*EUsb#MMWYVJfuf+ zX0iktg7L`I$BR(Lg5y-=b5AZx4>xC&Y@p7);@QomQ78HEg!1Mg8*LBOcHuU0i+-4v&!+qHN?fT2Z{p9b|#2JYYg$e`r?RJlL2Y>eP z#^DB{LnJV(hPtj8s4q&6L~TC*T=+>vha|uCZ;HhNi{4e5vwpr0STxrG=9=<)rXTrC zjQB|&P&iD09g3pL!i~))8x`{g=nCvevms4p&a5RP&c8ar)f%Vl%>rexFbeA|_CgKm;DG50Io zf8i;$So>_0a~GzVh4_KQA`L!!$cZYvX?qx-t;ePw2{ko4kseHXgYl;nrFncJ>Fu#4&=!k+5H!S*pDp>F_s6St6*oHMtS^t490S%sCHN@bZY~X+*ptj+j^fqaZ-Y@DOdb#;N}} zY6Oq;Bg@)sWH|ALtY>>3)Y2s4I~t?j-4mi2?f*_ntd`_j6#m}#bB>7u0s!SUyzWb& zF(W4%NqZVFpVEh(FF8%xl=Z3qpc;^<2VVdIJ}{1;6@iU`Ah3fJgXu-JRAk zNgH{Y!}3Ocfw~x=7K${lKgzw|BjD- zPh`=-KIQ8tNA&EzboQ=)@Mh|rY=hfYp|=m-%QO30v4A`Ew=zs$F%Th3tPM4|7zvY+ z$rl^Ca%*m!`j&U$pH{BpIqmvH2vi2Hu)uGUG2^@MaO)Msl3>j!lZFj%PrVyrCTra^ zs}G+RKx1F~;a?ZDO6#sZ7yF`%@Bdeo*dfcH1=)A%!~qihV}#8+Sj&O7o{*U^IFQ#X^zewBzkr0>i~yg4oI^{zV}1z~Og4L}VrfT}5A-#- zKzW0TJN~tG%Z8R_afufHWOlpy)$Uwyky9USGtlv6^8|QoMgplTPj;R|C%$DWSkP>in#zeF$@^4q`;!iC!j5}<)|d?QmCpt44%$}pfpY~?xg`dRZ{=Zx z@}{9=vRrm4XoKJmhld!z*0Lnhs%8`;+{1_i=zpjyjov8e-zn|C|H((rx(iqyhQC~C zuymwsV&C!;ob8$$(z*u@3aO2CmZJWx$1?*4T@um`5n_FYz?HUwrT)S7jnFGKyjUz- zLnLLB^yL&WjWSH6_GMLPTKxD=ANbu1Fx_|J{bo(E&@E77k9JQBo}$^| zO;tYx?86=T*>%OqjEZnhfyI4|1l=_y72i+^lHa1psA|>kG}VYfk$U?{Qe&tx;aTvQ ztSVT#Fa=x%*$WOSxt!PIePgZ}PJW)6;8itkZ9O+M%CKC=pDqi6#12U`>bF7JgI>hD5p%bLm%EMYAX$|>7%b0b?CcP85S5VU-o060ZS$C8Qj!9D;7q>t(2N_)Cey_R;tG468-HD7N1OX_ zsu zyp_a)q4X~!M5Z($f))P*Xr6R1=}x;G8C`KdxIWdft1-zB%IFVn^-I+feWjD`cAY08r*8xaNsvWZ4Ld zl^f0)v$dsen%ymQftKGcfKzH8wQ6O&%1Lum*5VLrNtdI0){xq~*CL%0QfK)rLaFd< zrzNvgUhYA#M*gtZICKPlSUYsZ{kDnJxD@v@y^7k+7zMe*o&<6L_^e+7mna6`((WiwxtZ2?^oT_k30x1s&a5}TctE8#%l|~9>WY|A8Vcd1 zGAmrdT;G}U-^R`ikbYO~@kUJ;b-z*R*ez``6M5ySx_C@S_#zvr2QF{e|IHi)7+OZV zRHyJIKTq*km_B_qt-PEOTA=P7;`%*O=Lnhm3{Ce#HbfNV*6u8+^l}jX4@u?V zg)`=l(~WOGaXp=iOXjS3vfSkg>$QwEY<1sXdUdbNo4+_={$t20cAHHN+FdPapBl5$ z+dtAHZ5lWe$UA4bhs#OL=UZS@HxJ!;j|<-&lY1g&aJidJ3*ates>KShH%umVCqv3- zqztZ^F6k%gDgR!v6#F~D6caW&$(LWD*$wzuzM^EeZnUeLGA_sas6-Z!DKn)CX#LWA zB9441PQRH4-%7uzHShzRD!8i?fHS&+8f;3AMK|(?!2jM1n@ZW40=UI`Wn}QktoYKU z-k+v>|8YBhhhVe7Fb(n}b#Q?7oC^23kIx-fj6_%9*(~jW>AlJbD|xFD-GVkU zX^QOjt!u3kNgdw8E5AcV2-g!wJSf&)ed>a%Fg4%uufhvW~seOuG6h z35$vIsImK(2J+v!9iNUo{x~Ma=GYT8swc#YOSG;2WeYq|7jgWZ;x%bgxXqXHP+Yck zZ*Tk3EOFbDzpC(^xxGH!toSIbu)9vMC0Xd>%-Uf`Oj^%CY4Bs0vHSj!rSmi9p49W* zg`Y+FWI~b9ub^=Z{sY>dp~q6r#|b*{#;Y?F}M&ZULpwBo{O4!Wm%vmosqs zoAlis28JGm55{Q|&u6DxOdX`ABz5nfM;TDXHni%K*rJ9Sj2TiOj(zyM8nKUm?Z@K{ zy?f%2p7qu*z|Hev$_%T=TzlU$MF(KSc&y?Zj=&y=H0{TFXZOgC3;w;?z@+QRN*!|V zvH%Y|bcHt*u03uB?64B|-J}z6t+GB9rbvA!_A~&@`m5Xz2fOa1YR2wj-ATD9pW;M3 z&1mX19lxMV18Kc>!N4=51(NL@5>v{}$1*$p2^9JR(%PoT)ydnhU9Y=k)-cTzRX7-2 z?QF}My^VDI=GB#Kjp;L*~c&QI9eT5V=4lR(=8t9wLi{P zj*#|b=hQ}+I)}q%s`eg10{ByGr(ogNehadktTNO@_34C-x6Y2FQ|k8J8U>U>8KBS8 z9(XYoKcg9XcFMP`?pfk?7+I~j1=g*U`K)clPm93-wb3IPlyyoyc?VZ=aCERU$ZJPD zl-i*MQJta2js$YchpO*fnG~&8@m>#f1FRrockA$qNc+hFMeaSNFe%XQ=X6Y>!bUIz1 z#tatCAZq5oxQ=&05vA$2Ba#x zExi8kMyTqrf5swz1Jir@Y!9TzyhA6SzBb$bJ}0Iw?hc?bWF2I+W%=}h#T$(kI+m~5 zu#cN&h&w(m(+m)-(>qEW?~2j_K=*zrlYsAO(n50 zol^fk5Zq)na}n~fd-NKfxNue)7*obK+II6^$(#H?Uw9eI3JTe`^58EKo=rm%^UFf< z+^@m%%?AC@Hh-ZT(c+=2+5hh7>$Z^gC}Q?NMxo|0+ta?vWnS7t)p1XuowNuAeftq} z|3#!6M_XduYMU%QOVNnd80n4pD+9aS*XBH2x?u^|SKQ_Zp6gp$w!{@N{MjAuDUPfv zIW?pA{^PZcAjW0%C+pev&*rfQM(s(8Kk;O8WG;kMF1T!nAb@K0%N_m`j zZ)t#Win1_uH_8;k!M5*>P@nki(Ob9HhbP|gD`W*bxjt5Uv<^(cUeY6%Pm4zlAeng2 zj&R-jE#j>5aqY#MF!O&IsyFovhwMpB+ug`jER`n3l#`?t>W1-6XGszE+-2A5M5sDTRV^$KS>RptP6 zpGCxh^0eyCnKif^T9VjoKY~A?lPzm{!7yPB>ou?!F<_3O)~%WQXZ&`#{6wFFbB?xH z5|5BpT*JuHPSJHvHrwJ=91T|rs7%VHU-rt@^UKRnvKrKUfWu;e6s-+Lz!4=AL zk%8B#93OP&38^#)*@Cb1C zhr&XDbWo5E2~(<~xco$XTmPoZ>O)-Oa|boS?dGD~3_O6Bdq94fi;y8Sv;8UKOvVbl zyHnzM0Z^1Hu>K%XkJj4mHvr)?MUNawt;%rtE=+PNSUzW=g>@TpQy-f3S&zlXFi1kp z=b!dKRcGfyS$7Ogi4Tx9*tMlQb!Itz`pc0&xE}=mDcMRK|7;^=RTDBI<`~KFYnE8BGaEUX>W7Wez)DYY#nj4( z_r+qKDI~46EY##b)8{g1M=t$=^LDa6RsQUx)ba}|V63VtA42zQB#q~NE0elWy4~Zq z&ce3^Q>K0`oz$Yv{6~u``<#XZNpnM#xhvEz^Sg}yGpU!7_IBPPy9boMXE#_gQv!)7H%59vanC@kX2$|-gKAiv?+b@klTYdSWSNV5p4w8 zNfQ3W5*ry!H+qV>I6%$=Q(7&C+_QOEKFIeZ(RTubMw`RfhSo>BM`(>;fkd*^8;V$` zRSjE>Nv|We1(XPGZ+}OD_OLzsZUx@Hzi;P^>}R#sew}R_mer<#N9_;l6hmq4I;EN2 z@PAL7I($B9Zdkn5lGpnXP&+ywT~^2U04Vh7QL$4mR$!iznPY8XuwSVrYu-@V5K``0 z@=Ux{rhG641%L&H$s?r za9qymkl0?z8=z1Z%h)#+2Bt&o2wiF{7Ex5Q)yl4mUhK5R1*#?sTyttzX@aQbxpK`=(V`Vo`h3bskI7IcTM=Y=ascwCsdZ zZ0r>RaJoZQQX0bkWNM8VYWV0g<5DxOnHfBEDiswGXl9ynur)!ZzM&2>&xTp1w@%uG z$Z}5(RyjUSO?}8JCN`zRS->GFl(xP^_GM=s-QV^s&Z}r%?J~L(Z~JCP@+By zmi0^CGE?KOq)YC&j>w1I$FTKAWH_8w)%^rwlN>VZb8+wc?M^Wgd^cOgJbTsXM!Z|5 zz8fx%4sXb#FHP>K&CkRJsrA7-h`6}KfpwDb$PxH>8l`+E22Lu;u^SYG&epZ$?5e-L zCARQ0G2hME>XDT&$32-)1kLB8V~j;vv4S%i&{~S60gBC@plG=WscvyU8+-N*KPo-l zE9F1vE$M|Jv^~6H=-FPTm+Ed5rxL1us(|D51os<7kCDSx(eoRfhah9sd8X(85oSl! zl9mpkZN0Ikk#Bvs0TY(jN&_3+RZRiV%^tViAT&y0(mwdpo!g>vdW!3=f~Ay{;WY+q zW76XjhRzT%KN5?}=A>Y2mvbBrANYWFMrKgmJ7=0243a2()$`z{zn{gL)Vfjhm8;{ zV&`<88o(dx#hGP?K4_Zed5}L%yi*zYIja$gpra7~Cf=;fq&KeUkfD3EH#d(XaxV4O z8|Mz_Wa!BZ*L#C6?h#&juHIoV2#mX~OJvHxw>E{{nfAOn%GcW+5nUT>z3Q6kXX|su znx*{ipa)lr7i4Jgw4>CIAA}VIM4!LCX}9jhc?etQnBf6{NZm3$A-u!S+T<$v)51HL z$bGLodC(cJV)gkl!pi;=OoEvnv0rF}#r+t|1L!-uU)r0<^NOnD4(xoBkqMf|2NBBx z4jPwz%RhaiAa26GIQG^>L$0NpO<-(lk*3);zi6kWK1mq`0>_jF+S%9TqwBHm9 z`{}+En8u#prr>{*?6}6zoXV41w7S}Si98It4hPf6bJU!d#mK_gFE)=j%Y-@ER)Bup zmc@_URuRGj&Q%E4L~*efmO;hp!0=|#_A!6=@`PO0Mtybrr$_W*Il|kp!FH^~Sbw93 zu;I(SaZO6XL#weUK_kB0&Co`L=LJvpF#{vcB8yVM2i~^6!nx|w?Lne&zc$Aai~qFl zPYLY2e{KsAc&Ui7OAXMJl5~B$gaOg>Z+wT8gDp92{2qk8f_0ysi841#3X!O9lkJWr zd~9E^8!kcz`{e??jT4j2{|ETdGqL$eP~v$+E@o{}((onSe8u*NI6i!WkNra>xlRU#H;pB1=+FE8*&~g?+Ujq{DQd&peYs z`L{xAzy8*?Q42^e3BWgW`tS5{U20eNlxPW?G@7UTgiXzn{z+?^$QlPm(OcAZ@!&4m z&mmE)>8*>e3QI$fy2J^+pcd}jT`3MHb;x5ZtMOC>?rbme~mmSeBSpN_uP57CdfHpKwQ{&^H9$sqM6`v z88c~#QW4|*r-W5tD8itwEVJ=neQYz`k&f&!{c||8$mOSik1>Rj8gg165zX0wrcge>m@c?hAkziGRo{}uXv>wDV?qRmlwTn zWL22Rca3&b;&GIZbgn{AlmypmQ*edKHDnDnnNP(RO*C!fl<^d!5gG`!4UDU|Et z?Q@14nNkc4n_^?QZccWJMh2(+gKFJ%luH5ac2etVLM{*0BN-1T`ZVVi-@c=(g!*Jv zBhZzjT`ZS%qdp&AJj5xP8JPm)sQmI+C>{czSnX2R`~7B(Wjwz6`q?lL#0P8KLax3a zf*J^n7TniuZWLx>%XI2Wm9LH2^AzMh?+VoDCT6Z-v;e+a3KO++kWA4Ok)=&dO{4G=6psjY=zm>eez#YY@t6cnxErk)=k7wyGMv0BN( zuT|u!wmXVaw9j%cR?uVAvm8WL1*QMGDJ!kx1MdpZ3xH1UPaF@00yAF2%;YR?aLuxH zL~r|W=xApKRvwOyBXl86zh$xN#=drjC-K|v5L@93t1Jt9=_6$k4GCVS%h6FBO+JJ< z49ouv(Z}sf!RnR9aIU^hNbRwOu*v8&m#i_o+&7Z7xb0(859gLIVo`r+CD|(?Qp?Js z*ij|fu;AWed>S+EjcERBRz_S+H+w$b^L^HRvHzJonZj1owa>n)RH&iQAc%(#6b*9I z%x!VshT7Bm2GWUrqq;AO%owcZ`dZy9M@#1;fMye4!)n-Mnzjeb${$FWXbYtjZ%-=M z7`>QEInSBfr^jcgmDoDh^qnKtx2mzfWfog-Ke)nevgr@>pezB~6s11<|0G8F9$J}u zSGHlgk`X^-tw5mpyd2ZSBQ=<7@2GLC7spxpz->_uwi&K@OkDu@{K>_sjDaq1sLV5* z8>0SZcrC%d&Gjdsgjmhc0KZ*hoX}}x3rTbs$ZU`|rko2-=KPDdn{8h{KsW_xmmUqc z9&48D*bYU|O#z%Xr~0ikTk8VuHX7oe^ER3J7Qp(aoOnRK#Xf>_;q2}VB%fcY zh>(H}t?b~R+9^(zxVUM4W#3gF(>1{V00e9*(CMh^Js;4i-XR>!M&+One8DMK_&joD z%)jG7G@YTcmMFQ+f2Fy0PvW9&nDfx@;1>`{e9lGVro>wAJuf1ESJDu=`0m;iRlk=p z2A-{Uu~XSFyS%rA-aL?)ixR$JN+(nFQ2^OzJ5pA?w_=g~SXg(@OD&$rs2`0%n5Zil z9Oj7p53mGXTnIw;)~=m$I>EZnIoYntpU2DOzPq+Q!2M?6J(158dB*KzvHAW4i5x3Q zLdz6K3-W0W4)F6z+GX$gNl*o-(^2 zB~ovTwj_sW_Y_R*ehEJxV^?Ev*&M$r$bePImA4<@N4p`5mZgE3gGW&_EuY-aY=S~~ zA}t8Hv;m@j;oGZ09R?5D^@(=8mL7rou4n$&`c<(A+HtsF z?cbyszGCaBAzl?cqqU^G;iq-HJ%?apDL@6n7uP~8J}+~nO)h0x>rDU4UB%8 zj`@qqUa3zf^dW5~pIWOrTL--4TA8& zs?EZLD=dVr_o0**nxBK~Orfh7dS6kzxQ(0^ zWRwatfw22c@&c@#M5<2d_Sz5w9NUnU_fgRU*Xwl2-yl37qrCpCQ$ zvQ&-VP%io#8Xmmc)Cg%T5*mgF7XN$UG&8!(2QhQn5=$<}SJX7+mvFtr-F_S@+w)&} z^8{N*g8g3_P3Upmgy{<2Kt@pqm;8Eg)NFk(hs(EdHoK-8Ef4XOur)4p5-y(nz6-{? z@~#bUH=7;o>7;=joi6^NN!=9G?qt*I`=P5Hs_#+#HTv#36ZsV5D)y9muZ&v=dQF(kJRhj4 z)@os-?{pz3Z8l>`70D)?>(_qoXVJV!%U$|hMzuN{s32oRT${FEnOPRk%|Po~7Iyd# zjZsdZ?a}iwmzJgQL2^;}#X)~mWiVK*(- z-7Lwk5_Fe$k5j$#1)qpvSK`(Z^F82qE&YO=XZ0pzpzE;1)1DYBL0&BDfDW79hcicm z?Awf!ak}rnFo->(bvI8?9JS!}ajmKXvdEhg&6P4oXZxGPtk1vXcjZGodzoXnz1ih5 z6ZZq$KTQ&-%3~&=plI-X1K+?uOP;!|sW~QnGusNy(7@B4XD#c{0N8S;Q@c`=IEbWK zvF-Jic#tM90DGa=t-4AOk8p8YI{TX=Y45gpghIhqb|Rs*$*2CXvco*4s-oyN%?ZO_ zXICffWL_F$7?KaK5$mya@2(qMJrlfbMz2Y$+h{J4JO_o3P!ny-m8)5w!I@}QlPU*Y zGc$NvgR_Sam%ft2DQa24E44D;k*$aN9$dtKXE0{qI_#l41niGWNOnQoO#^f$1ssHD z51d~&1$sIm@53B7GkyF;89jnO_Z#zLu>-h_#r5a^WtZD_8X`-3cH_aGo%D=)`%M@b zFv^Q2oIm=`QT>lN*+xo^(^Ih_Le&lCDh?>@Ab)jLUM*zmir!{112bA*=#GO4HqNo} zHli8ymTwA7L*=NM=o-5mFVCvIb;9cLq;sP5a&9yRxTqWW2hS@JsN~d`>acQE`X*bR zY1o9Hxu(r^{X z%bK97D&6vcHc#8TLt4BU*@Ej12O4EHJEZOJ8p}gUErtl9B7^kTEJ%j%{I8T2DPLRydXGMI)*TBEm!u7eO=ztCgl} zr$ae^5|cd&UiwQ|gFRSLvw8E$F6>6n#Blm0l7jeva%1dqI?G(*zJv`Xrp_@aQgC)~ ze#U$M4X#%!0G;8h+AlK9zDZ9p(2)K3vW?3wcs6nQ;AZD+2Y1RpyT7IB*XeH_Jh$W1 zSonI4Atm6HmLwo1oj3tSZK6+o-o37SEAi89S*Ytqt)*stXTdzd^dZZ~iH23ScY;K+~Ey3{mpI13S*e_)nJ(}Df zzN$Rc7?Hs6uvDC}bX3d3Oc#fzQK8*+>((x}ScX?-0Ggbid)0^U^cImP@Tv^D?oW+& zt@u2r%*wfnIg+oCHjafD&?EWoLfMd8 z?{kXC;lR@#-#$|Bk?17BbXO~l z`mtb3J}sy#MTfdFdaB@-w# zLG3Y9d0zsxz;5e4HdxF_^~FAkQFIF7F?^4O@s$Gmv?|aPH515*_wO zE3>Dto)jkXP$@-j0L8=VL~{^lwGdC%@kzdy9n}KV1}XjF1mg#Ky8R1C8(x}X6My3g zM*5x#_89G|W;#mcW9&(-c;uybV!-moYO{Psv@3D9X`3sDM=JPJdVZUI>oc!)kj?PH zVv=7B_P|z|w*fco0g@`_y{yU5rh1*5YDmlDUQoFwB(1{hjPjn1@HMcjl+5nFzecN; zYmK6Or0@l<7+}`)+~MXLFv(NmHm3|9O2-EDlELnqa86buDm)gB(MpXD?`T6WT%5!0 z!l2zy03W{{`^QkDbwF~6z!$vZ#C~}Z|4kf3I^{%x{{>a!Rl8zDQEuOU@&96$|aNy&j)O z6am7^um-++J{V%dw|`GWp|w?O)tom0c4vR585+{7kJ+SWlT5_cgec0}d8Z|ZdCQ_+O1+6D_|xnUdfXfC-I(#r z*dMhP?2_4i*3w{q{p^UJRg{D)ZE+D=xdx6elIF$sb+ak8h!^dArE8=0`1B!Kh(X#U zs}HHLG1CN1$zi(DopIpA#lVOt`{+9rR1Ip43-3xUp?GG0a%x5rL3^YeS%gvdYRk5K zKDejQeG}t*J;%(pcrne&7QK>~zehP{kXEzeH44z$CU-VGYiX9=fv@BFVsP;0IqoVf z+>b3*St3K`?sc@`EaFTe(}n$QDKfqn4}Zs)$~CmGuFOzh36~7EI*11u+miwyMl!>R zYs*MOx;K0bv}l!Eb5)gWYqZcTg=uK;ZK?HurXZpQd2G%s zLLaN$nj;Fb{tNzV=5G%ymuQ)%H)8V}di1?<1;`9PWPV5{a%SA>}FlbIm%bxfqxOVCmc7O83&(GpDnx{_Lo=dTF(9_ zxV^jlv$jrz05+}54TWNrIpgBB15U&l_b!3Br%L!Wp#j2q{G%BV{k42`L+2<~Gx|b6 zB#e~h^`BNI`e<$5t-8H|cSNaHbdAEF;*{Kdg(p(o_$y3iEx(`5C(Zg!wpSJ341Idk zXx1~fLxb*VJJR1N%t;IJq$hhyym}4Ok@^5pFFEm6iDZWqil;-YOnm)Hfitvw{>^Je zO0tt#O%HdqB&Cr3glWAMxp@U=ANJx2tE*ZuU#u%V>}w7l$#mTxBeDd!)}YCwRrB!e z+lhBopbkM@x|h=%XV0;%XcB|JcJDvS$Y>!VAIg~E%!%;7tXcmXJ4RE~Cd!^9 zlT|fe@5w${%d{uHPEOnfVr|4E{~@yk!I$#mzA&1T=p^JQuwKEz>n{h@%Be&v z{Ia=~rNAqdPIL+c>5;Lu_YK7C@t?071YNpomlMo-n;3tP7+C)F=J-`zmZ?;mm+6NL zNy&r#QYRBzuDu10RK@|Z?k?OJ%(x-Pn2n_bF%p9W$%Tp$mQ&uBahrdGad#X>-15=2 zb=TA>Gb3E2-(2>0IUz_T0koJHfD&4r7}w9$ba4?Xzuq+LD3>xl0;9jn(z|!@V1pMW z7`&IKgjYm;;6l?kyW-=@@r5A9({Y)*F5$23)ik&)|LqOQ5N#Yb5uh9sDa^Vbhtv1B z?8QR%`;7^Ep4NS^xkQ1dsuA2{kdHF~Pw6eB*$;oo{cuvsq|=Rj(goatkap$BhS2ZRRR`CpcXrJU(T5Ixjq!=jH*Z+C8 zzuPibvSQVC!vy(|c}5x&{PbvV)-+C_Zh8-Un628#s4r)}VJvZhhX6J-|3!Q2biBq) zg{GP_l+{J$d#nFWgx)rMcOFkr_f-Hh%F)#L-c_oyzcAht&`#fA=4v3y^eu(X3(jcg$ex;JC3eYdOw(~1^1NCYsJVlsOZN8m5e@ZUx;q9y4d$lvZ)CRtQQGV; zLa!9|C{ptN3m;PewDf=T+cB)~-QBMOvB;Yn0Q8L~$Qwk}7YsKr(;*F+ydI&TD>&H< zU5ke`kgMBYvUR00+5l{28>6EbK}>vlb{Zy39ths_J(tmpX!xA}n|YWGo#J~`-8<>P zI|OkIKmQ+~Wbp1Yt*ZO|6dJbAW13giX$WK|+CMnHjVQlUIBE`-D{4$N1Kl-)PG-k- ze$86TWi9*}Y~^gY{0YND-*Hf6x?>_gjBHcHnSrvfF&89Bs+|v9PWLIj*Qb0uIxJ|r zR>KlHWS0Cs?^1*qJYHQ-I(PEzJ;%rdc@BaujeTc*{O$>A4JnLm#|p$qGYc0#dVuxA zDugM{syzOebkmq!_2FO>Zbr0zDq6~6o5rfyI_RS?tj1&Y960d2F)W*(l^`xZC&%I$ z=5WP>T7dp42sPa-v}G#5>rH=4Srz@$f6k1tQh27ZqRWzF=6i8&OV)f>Ks}Q!>b2mo ze4EWy6kT*m;?ovpsi_ZCEvD zN~tc&e8tu4aQ2$64PUHwoQXYjg`FO$VCuqE1c-h9sY=kTL=N%i1T$Jo>3a3q9K#+G zaNnHexNJbn-L|!`SBwc}-(CnbspP+zamjdk)!(_>6IUB~D&5Osu)%^uKdm%H6Hckw zn`gO`_p%WI;fj}e6Hmgwu?aI{*;A4DmELbnMIdi~k&;aL$%jj}2-&TP=_*mHI7)W? z_Q&Xwsq`Utio&T?ce=XY0;9P;xxw`cV#WGBmF&*k?pJGy)LEzA>fDS}CmC<`kt_-V zC;6?F|5kAN_qMK@?Z!FL#f+wUpu}B6oo3KDBKmP(C|(rgEI|A{{yt8$+Mr)pKINL zD13ZbVHhSm=gT?s)$=EpmFnM{TS9rM*Z&Lmt-8O{vrPo*qiiD zDBOq;C8nEfz`l$M4^?%R)g5K=Fpa>P@WaZnsBySl**DQ##MJ&DeC7n_j}(Aa}9HWGD&I$HYXpfijM~ z3yu20E976V<>S7Ufs>Po1x5<69o>9b%$Eg{4y2>l2dTpgGT8PrSy* zDd{Cn2OjlnV+K0eI8Un}=oSxT#O+I{P~qX$y;P+{=OU_mCEUf1<*M=J0`V)%zoKC_ z62r^fui`d^Sbh2OURE`Osi59w$CDCX5R@|CuFHI5bJE4-mT4SZa(9%m;9dBTCUErg z2y@z0mo!HBoBAUunrPd-vLEaF$1m)x(o~WHwA)mfJa&GXDR0+pB-EF0Jm;D> z`js`uor#7TfsPrmB1Ttj%BADFJA)&~7!-Tz3I419l)`1qhwt@gK(^`-h>fjESB##V zq;A2i-{|3vAE{9S6_!+S{WC2}93jjyJHN+o=dE;^c=)q*1Yvy(O~Z5Kx=o31)wtin zHDa)2-5|eNF2~^LcL&cIXpH4gt1YIEJ7D2pDOz%#g{bjM}dyH1jXi zaHG(u;N&S8zfTd`zq?Tj#g1dl&joCV-!bUO5r;lP_KSGDYMn#z!#ur{ChwmkC4@}u zCCaB5$eUdMJiIRE{Bq(p|{s!*49gmU_kOrj$V>xIa+SMVyEg zQrU(`GCJ=}iQ3DfVB)PWdEcFO%erQAwA6u1`OE>ZD%v8d|HMiAg{*YC%r9I(4oNem zQcZo2ja!cmaKm@7fig4Ee;xAMpww0<@mUl|rO5-3Y~3sFXen5u1) zYLoqr(lnFh?N*-OYr`eAcE zkhyZ}0t;13TN85>nL?R;nsn7#VTgg>DB0kG2c-!kbSsr%0lrBZ3uZ#qQmoGA-mr}3 z>lqzYtLOK%Gq^af4$*)x`B+a@%3`l$Fi~@~Kuje{;6lMFzZ_;*RjNff1E`74Mj3;& zovD3EB!Q0qG7clG41Ss#E~w9JIAe!#tsTeP-s(<~bcJx(8H230|CpRlXSWp)=s%l? zDl&RqTjz~=p9)Ob5UB*W3VQhOaL*63XWg9%&`$Uv#}nSpYpulbHRQFGGO1VY2gTEr zm%SPUmDI3Qfp{U%NbkF)_zuSbunJ;xpiiE*D0GsZ69|y3*|!t*X`$4D=2o`g-}qSy`@C zcN@5alSDZUTWeR)4W*82z*CBYl3~E-5pSAbcWAD9HapeZBO(`AY?h=Qb=~yai(|^3 zPn6pJ5rYUliU&hY(g**v-YiQfTXYXK0R-3Y-c|BfUv+Q*iP&enM@Xor{7zDvf;4fj zlIj@E12RI_6!s<(i39r%9f>j0zZGF>6H52Y^qZP97gYwHDmP68hAZe^_ELOMS@#hwZI=uKJ@Wz3_R3M zrfau~czg&RK8P_%vlD!pfCgvYZm!b(*wZG9({&`N*|RHf+$&*`DcQSO``GkgU&rm0 zGhDW1q2XMXyY8iK8SKWq8=MMIr%APN1Lrm)k?Cgjb*5+ z0)M6v;__FkS2q2?2RcyP_Hh(Y3Y{Oapq$^0Oa|>9@^Y)HxH)7J+U94U)x|`Gr<4>VY9e+S%w3Uszp- zwKG~dNP`V?^<~L-05K3}UkZiyvHeC{|2c}nOrTUk|ylW7p3j*b* zx61K7$JaK1G#~rbIUJeaIk?fXFJ5kXvTyKO^prw%d8j-ZiLg~(or_;0MBMh29ZLn0 z6lkjbF=C;o)QEvWlj+507-(uV4jmi+EL|Ow=#Z#WjJ7Y0<%yiKGGh-$d_Ysez7->$ zqU(OU3)oi?pL%mBf5O_io+St+p5^&*y^B#agMk80e1VW93{>Ki+= zFtnx9P1$0pc02>`XBu)4lDl`b4^VI}$8ELCne)l?2)zlL zMPe;^hqQ|CRG(Y*I?jO(CMoLOx5Vg*J|@J>euK#eOTZNm-+txU#=5TP;X2j`Vjm*D zNce3r?CQeB@pbo1B{^Tq8?K{?Fb zr%yby-?y86LYbI7_sb;9Zyl?d0Pyd5=FV9j`u%eYwLMhD99{qDLSfW>@m_)U#WK8D z!NxeOX$tD{eEeHn)bxaty64t8j&OF1nVG?}u4zwD~<9KE6Q-+y|u%X~WkN zV^z7@j~cJx2dd%2$17Ih;v?Q_3W<^QbhAkRO5Hs&XB;lsuK5y>hy{ikx!YxNVhtkw z1=ZL-gr88}YwRW(Jpv$f7k^s4U+X?wskyfMXdE#G4PR+pfq%!!*?<;} z^5>I&oq{q}7#}<=Ft@yK)fg^*OOH^5)7Fhrg z=#ea+ROZ18CV8Ft{GkKPqkmUy>kQ&~5HbszC7IeEJed~yyoScwZ}7hB#@KJSggxXy z6rOGc^+$8&&I~U7fXJ9huT7NuMGxp%Aw!b(IHIP@_mSk&FJTz|RE40GkkytRbN|oI zjxDM3cdpB0=k?`AqV`2PXQKsCQs zmE6z|CgIso6pRs+jbWY!!6?By0@1?`aO^Mjh$^}lNU%BJP8X8OExq}Ogsqs(MeUwT zfv~o5L~R*@u#5$*lQ%>dv#`yNa7~*4=D1)`1!b*|eL3|Mp>-;;LkB0a>qNX@!<*oDPT@n~x2_TZP}dnP8xh@Aog=mHPZ@B0nU#@;oe=Xlt=mFbZFdUvU>U(9u1B(N|+b7OQXs+@PavH#UT=aS z0d%}C0~-_~)u8cHO*zs1kC80`UneMBNVFPyV{$LaF@;+;_V`7XnY7ypSi-sF;#e$V zuaR3+8Pg`|yX1@=15{`wK;8+sRyS!J+_^0C3UFXD1B>X1M7J%#4$4^Ug$V+I#FT}F zto6$gZH}U#04aaOE?7q&6dM$MMJ{EdqmIIUADsBa9)f+&+@$|W)kx!8LMVhds$ zE0$Ii(_7RFLhs;G=QYaR_W3T)2Ft$;_57`G}q0aWMl64ZHAz&nc(SwgPmkU_dbZ-k&-hF7D~ z4V_@a&c4G-RHc>FIUvTN6~#$MYWd_NMzolFK1OV%pj3^5z~wX^DrH+&QGFI}>HQm+ zv{zN&A*Q-;_V+Qcw!x+MEIieY#Of|fR<~6VYi^i~8V=1XlKEL+8z^wpqcAmW(cv9F zUHGyp1KvAc7=<&nR+SpP(6esk63W|=lopmz>Jh2ag8Phssd&$kuw+Z*z*yK4w0gm_ zmV>CUEUq}ISg0`P)F{@q*eYOxmQy3in@0yV>QYFe2QL)k9Cc^*U> zQJx=xx14_KJZ&^3scQ+C*=e_(@Zsmk~A5L#$rn+nGQIAeAa@dS4^fl(ghM z5|tYoSO>XDR)rmygsnQlg9J4*>|1odXCPex#5(PCO4}+ZEmnhiFl=`Kh$)4n?DiUF z$`9y~0KK(MJrJg+d}R8@g93Ab)mKsA8KTM%7Fx?8xmAY+sfFqnu#_GuWS6mY4_P*UBIR!Wm>43TJ*uJT8d5o@)-=dkUBaegw=i`5{P2tkD9 z-iPR;@w-?ey+b(IL0aX^r zBLY%Tve_6{gxkg8Z;F;@*3c}^;cDaAQQ;R&zL6L% z;@Lf75mUgY5p|1-Z#VFa$)Z*3#G;fKhN8M_h|QC|emN>7M~(xCro$N)=!>NtB~~M{ zvf9+1_YhPPwj41*axZjpBudw77U$0;@`m>cJ@zY8&n+OXBmvrpfKcY|kA#Rs8CBie zr9p&Ppk4VxvlmxE92S2?vTdl@{?b zaTa@NZpz@4A#Ksu*=z&UYNHvko9USW`U{JchlX6U8_RR?43jxt)+aVCITFKIF!l+h z5xaNtEopqdB&o{0sfxl&T#xS+6x#vptq$)|89zi5R7@vKNDIc|V&j^F6|0D(?R>9< z>Hr8Knr-T!1WTY?#$ar>17af3;ULxKCkN7EchZbNTDsJ7x)?#DyXWn}Uu}vs-@Zej zHbVfD<@Ut(OOkRgqa1;J^YkTKpMhMdBvOXdF!EsdUqJt=Zw!BUFVKKL=9) zywysm0v(k_1BT($kjjk55FAsR_{6JK#biFjM55bG9+7f4Kx_>u)F~?k6vg*&u)K9J zJrc&2`5_)g(o+RkJ>0R5dmZ}CH&!>Vt1Z+3jDgZSHtoI+XoWHswOWeRO@kvBWeN$e zY6SHVtvB4Scy5kcz`(T`xGjMwjSDDU5kxpC7Kdy`S_&SMwi3Y}&4O;Q7O>(Z257!V z!YMRX%^|{jI0lwG7?S5W2T_P^GCN-P1OsO9u$?DSKpa<;JcM#Gk$^o_2Mdjm0jX-N zq^XcNt0z+t0xp?TtAQdBQj1(vVB3&LEnreoD+Zm|0|i2-f`hng`310fxF8jP zI5+H44k$SuK?FmV!`Q{iwhBrmGI(hGR2>kthMuYmjjg$-j}qam*l#HVhR~r*pny}P zlU1gGk(b93g%!oMKA}m$gK+5CZ4^6YQwB(uGgq04?|k^MPqT? ze1;tZAY$cFPzf|}kkf6AdA*_V|GXhsx?EXgeJd%)`(* z_fT0N7C4=q!J{HzEMgmw;8-uFWgti#i-9@CMez~>m07!Ckbyut=!%#V^!1vc61%=L zXQ&0@>zEYK4$mKtWr0>r^2XbTN3ap<u6}IBU*ko`gumn3c0aQZFD73Cf0;r&p zn~iNu!K)RS$oAq}pzHW2`^`R6_vYWlc>Ry~{{XXZ@a=w+_mALyi!}V7$^QU)Yw(+X z6ZBI@=l!3lKgU0LHv4Dl{{WUupXoOJpQoCB&)3iMf3tk<{(p%-Q~8O`Yy1(<+xUm} zKi}gH{{VAe%lK}7=k62sXYWtZWBQXr-g>?{{H~>e(U^a{O9aX_Wu9}XV!kP{15AY2>zu107Lncm*#((F=|Gbe&QSX zPu!2tf8i@>k_P_(kMgnl2l2#j)}N+L=FtBDR(|0KzghkV^ylu+@z3>#>7VaE)c*iy z=_l_q{HEWQevE#+|HJ?z5dZ-M0s;d80RaI300000009630}%ue5+DT=Fcd%r75~}* z2mt~C0RjMyFHAVtehHAV82pAgEhL2G3~mj}FbG`!u`eqXM%i2G5eR@sVO^6(68a3t ziI9kx&?yl1xsfn837JfaA%ys8h^mM>Uu<(>#9{KGQAGezR}z&11xF}!Y0u(SpcGL|B?Q$dj7Jy0k(8e+5JZOnLmnJX zV=#zLMOLlTJ|;E9#5x@f1QVDLJP2_kfNu~Apd2$Mg}D-VyyxXagqcJzXqa5X>c$W@ zypyDt+#RBvf{nzWgt0Q0x1Y7`{85se+vH^go4dOP!Et$GX#g}vQ{Z4k2GCb!0!tG{ zUPcy7&t#%_-1p4uEe$OKz?K68L{SY5U2`FU$Cy2Dlxp1SBav|obBYIcRz@=u+xU(Y zMY5~Z@?$<{!wXyQZcB`e46D<^5gVY=0iub(NQ!Y>vNx+Gcj_d zb5{V;ryam0w|=mri&J0B7lmSd14jdSP9+HKl&Q8$(IySuw7hp194H#n4CWVtv*+1Of8 zJzC)K=@pHVQ-q1a5L9HQrfSt7PVVF-s5+>#i&`7RYPK{7SF-)@$#mYZ&~Y1q4>OQ* zT{KOI9UV1tR0Ssxj>-iUchskRat4fOG%3kM(z0eX>Y#v40!NPdRga#KB32`dWDyR;wQI5B&=1jAzqw56A0S4yFiJ3!TPK2M;fI_ZqZCdTCPrf^WwS;hE03nVqD%RzD*A)b>Mya-Zvw*6qp*opqJrXkJVYv=vZ->g zdeS=sy&{4p)rZEM*xMvzLY;>jP8o(+0x?Al>~kV)?TqnZ0Dy`cI)go)PI=tY?{6Xi zT+dpd5hG3v*;Y!hdJ?Q^!M=eMP}$Y?a%u?las(X&4Xamu?$z2lat8T75+?C%M;q5?FD?V+FVG`M4`LB zO(hZvB)qtg^C^EnXFF&`QADZ&)q~FT;=|UvUpptK9yMqq(drjU>$@m6VjCMm29=JI zpjd*GC=lsYRiW9)504nr+I3-CyXGBgHEtk;ac2&9$kgMGlh$#Y7d3hN_jT(Ae48_+B0~_>Kyys;OR?r=h1K+-P+L z%Ep{%p`w^%a$sommPp>`M3Z!fz_d4y8pmYtvRAmcra9@wtSUa?OaTb0c!3SyW@w?K zpsiJLBSpkwPm__>#)>33sEIBiTaKwxtI_Dx*NqFNZa30(W`bO9ChpdxI= z?s1L>nsRc9Okfq+$U#(3q(h_7A)=Y0f(gY0b;%b86UZV1ocdHqD63US6m@5B#hz z;lUiCKk4QlTs^#lIM)!~94Lw)I?Ch-Kvh*9rCo$jL#af#6&qTL01E1K#N+{SiJA!L zbfsMwV@@F!k`t4I6WTr@#aaark71@FoPk9X$i33dFv^m4InQ%z4toxn+{QyQhmaZL zFl34S-1!7r17UOoSP1B&)T`-X+>lO1aSWwp)=h(YD~l7g#3Go7c97Sq<3yASv~*LD zL2>Uv5(!0f4&}{V6@KHAbB~ZY=QQRVqc7#DTgA$SdW@7MtiaPy~k&S~>`UqL{gt zgVn-{3d_B|-+$?Tam*QqFywoAtGlqcdW0U1JiXr2FD`uXOVBy2ieZ#ylX9?DXbRU4PpC!t{RN)y)^ZE(`W=q-U_gX9GL>j-_9u{dm-KSs=rI_5 zTy<4^xrd2{NX!`{Bj4`iua_X;?qw08qJ`4(as(0>(-CNJR;>fZ2Y8J9^2~vl<#o@S z90)Q@a}Ht3{YGv|!`qo2OI+vVJUBSN+AN z@?Jh?J~lJP5!ITI0uu1@8SuVz|{3P0)wY#f%f2(`Fk>b@ zoZb_&jqon5S4ytH0HTSQQ{Wbq*meX~!9@JfI)m2r|7ES8*enB3$*Owr$@!12NQ$yr1 zujgASfvZJmksN7VgjyO(3aK4dujo48OLJ=pHN?HZd9dP&9};z86$$jVC`){=kb9ov z+UZ@Y?Ljf)FlKwXn2smnIR36WEn^yi7|`QAQ{pd$(ly8pS4+FKYB!9R&g2?fy6Sab z#!y2N76Z*d?C~d#nj!M7(|t z-PQ8EHZj>w;aNuzhd6l0WXn5-uKvXYPKvc#wS5k%x?g-4t;)lkRBrcVv?}a3cVOT& z3q=7M@)K_Q)pe~7)vHt>7=sgnieZs7tP3fzOB;ZiC(24CSIDcr^-9%rs-vUw8m25Z zyoc3*4DO0nCytk@>J|%_M#5pscid`%tq=mB)?thhjm_DIG1)xyL?AlyUIrZPa+O-I zh3>0C9@l!h*j7;a)~qO0#wnWX+=F`0pvuFbYo%5TUYZJ`vr~u~vS|%+ zu!KV%JTK2@HTk9F-^8Akj?Pth}B6)kG4j!188qq{F6h!P0`b+~K^PF$QFyd$-R-S?&DolmA zLkWS|uFW%&9ej(dXd5c)pjX%^-yTQ3*)M+VpX08i>hyI=`X7Y@cR`Sniwd34%v?*{ z$3`lc2%cA)%gv>LCk%o!wN{}~q zcXxlzJ|nH!)pVeNW)mMOjAT$s~uS{Fe|WZ zvOgaOjmrN3J(c5;1y>AAcO3*C=A%=`IV8aA$k{}q;z^2BU zCVx2uS5w$4)pn}wRZGUbWyBFRO1J@s69eCp%omq)nFa*)h*>P+WjS_0mpva6{5Co! z)!9cGkiEoYM0HeJ0wV+$c^3paRt!$-%I{1?XCd+*JH1w`M?>jK@4eY78JO(2C2Lwu zQUQ&H>^!m?j>jOlF*HTvUE+*UWzCdPAVTaMoUb3{aUseXCnxm9CEWu%oODn&ql+FF ztf_j$*rH-4!YBkpWym?s%>(#rr7G25LEQJG2CB7HKt`jU9H&D*9}TiU$p$bk$vKBA zIlZPv4;d>)8^6$F^Y?_-J>+;iuUGn19m|!8TbOg5m}M^^UEdgSB4g#BB&Q&&SWF?# z4q*r)<11W2d2u<6-Twd(Rl7%Ly%E~g^{{Lsu55HUtjRde!H)ppiJzS8vnsXA-He`V z!e!5kYfYQzC8<@u7z_XE!a(b_20}C92U(SVa|yH|+9R1zn(=3z*W` zTWg3!h02B`SWUqg{{W%c(&ra(tFUd!iLHcQbHRZ#PM@4_o@pJ1uOOe|JFOAY@2zyb z9o1|A382Hn_YCO#kLq5J!t4*gFk^}%$;J$ayc@n`u)(N`O%F*@4Wa$T5n zPI1ru?#pzft5h^aa_mL`2PmCF@LWT=K3V1i0hZ%Bui+i(S5wxjRJke!Q zxrTD&5i0s%!bnepA~bh(aG-*oXq*bZMbVt60>{RDgKqi_le_o;<%BCFCbld`0yuGe zP7Yo*Su_6tO;<{;)`||cW3vUGKl2=y=es%5-ynI}R2B3K5i~h5-axqa!HMBIIAaRe z9wpN_Ix@1Mq)`3}&@jfv-f80;T!|x)a>~a80a-J5ubnETc8axNkz+uRgINPGnS>f< zesi*Yq^b(-18Rt_G^>Ur?U9CQrg9@yOjtoVt$-9o-zmx~4aL(b8i6gYfKsJYtzL@3 ziNdm$D;UQ)!4`y6OEH+R@vNp_^>d)CfJtKp99Rv&;B%jx{KD^79$UNBZqCB3LPOg5 zB$C>_}%W&*;WzgdXAT~)cl8QP&g{8x&ezds@42Xhthx|Cc?mP zc1+W_xb}|_LR{?AnCyb#t4CE|8{H$b)S!=1un$qt^*;f#tZ7()MFOJ)IgcB@gYA0( z1&ssT2-PB}u8B z-TbcD=%#52#I%-4&)Y1|HM-RYXd)>e=%h`eD!PPv*y(?2paV{^&vIbO|Bf zN=IX<-Dq_kuVhda(vE@nRcLfQ28ssbmI6^GS>^<*9dC4syaD14UXIIvT#!q1yTmmQ-%{djW6_J#PL7;(kLz;5U1|mWrY)=UoPlsGCI%6;?$d zy8B()KOiqxckw$rI<4Qr?$8~_L#pUU!uLw;0yj#lcB@sRpeuD}WL0#qt6>FIs#K|8 zPFHtms?a^_rF?5wcWSDYtN0(SZn_SKpwRAm6n;S+UFy2l#_sO=-j2s%y80ib6+=V4 zkyr32eQ6Xs*YI7cuC-lkbUO(28X~?Ucl|Y8hR)AH)M)5zB957&iU^~lv9r`@poWSa zN(gB7Jx0${)M)ew_d6))Y$MQj8wltlzyHJlI1vB>0RjXC0|WvB0RjdC000010ud4e z5FsKUGC@L7Ffl+AP*Pz8aU)<9fsrILQ(|!OfT7|4+5iXv0s#X*0A{8q@*T9$gf~iV zqK4XvBIQ1YCEDs-?%+eWX*%vyMM^UCT5H3-3+de*M_tI@aZ&j6w9cb_GG0oIWxQ1= z#D$GJWPLZZ`iPt8&!)d+&u*yOz^iOXK4xodOJQh)uP!%@`f63V3$JbT;~0n(J+k&7LLA70bj_jy957VpSsjY=JniS5ShK3Fc+{j`*qe5(*J%sY69nur!&36e1UB2Yf+&2$m zaG6+LlVGOmRoX?nE&%6S|hPF2vaHwNsIslMXT z)R}v_LhTwZJ9FE^rc^7t&|^jHO`k|*eJb(K9ovSg%uE^EHA0@O@h5bjxbU$zJqnfF zGMuK<{Bk3Qwn&TU%dTP7~wL6sd%JqvT7o>eb5BOe}|Xs-nXp}2!y)JXGC_{9GHEdXqaZTCO zgzE^hkQmA#ys5)(SM4c3(Q3M-@7$)*`a0)w6D{;Rik6yg<8|;@w=)7TdlMCQstdth zT#)VBQI*6kOVp7^w{_x0uZvaT*I5TB>RdCwe6xX=$XKA3E z{e+&>Y*H$%5`&lPk4&`nb>U-Ws7v6lMumPQcq$?!NQruHC&ygtny7`EBgD($xKM8l zvvKt?cI5~o4wi~=5}2GEeq!}>_#XT<`wa?;mrFrYXqrD_hf)-}q#W$1leDKO{0_^D zNO>XU5b`I~v#C{aFBz^r+6$hg4fhnwQI@i+>^l5ig%ng_6p@F4na#Mahzc5V@jkQYAIG*Qv77kKLC&rJTj^+D)~g6tbB*`x{Z9 zpHX^WMO#FEk@cO~Qz;McUisDMw}ziB{)Ym6rkl;Sl;+fgR>hr2McqOZPBK*$J&^F` zj^sCz3xd29;6fM2j}_cH z=7b~hEfkU4ltqm|7V0F(L_7SAOg*tIp{)YFEdEoDAR5RFF)Txp)sq%oUzlpk*%?6o%Kp{>QH zzOKA0Ha-Me_X`jI04bL*sV{zrucWfnS+9Y&(5`xLvhSMf?n_I^S?o5&O~oC#>S$}| z*DG_WPmJGDuDL9s;QWde=HS;Q!m_vG>%~%;Yw)N}Qui?*c|&)tN!oo)qE6ux6*JV@ zYE#i!X_XhoSW4*_(_3jlFYJQbCSb0nA8Qj5FDM}~GTfOb^7Zs<@H90O7}i?)*38tQ zT(!?!oy9(K7m^-P4=7$mPbG`kc$~XRb81w6Ohw+t-Lf<-<{j?VO?4;bjcuh*(fSkp z3&^~Ph>GUr(5FJ519n;ui)pS>_3g>!uO7CV-6Fi+mbv~pGQOfIn;5crA_fZbVXj=4 zl5drm-b_c-o8~U&5QUv~lO{}Vdxc9!I9n0GZ4?$?keJDi1}ABW+{kwrY{OniQrf@! z7nK?$OvENJcMxO+k{7QLe4R%64fNyJ;5zXx;}6Oqw@h&zo=Pz4BP8f>2nU9UT4QNmJ*y`_PQyFrz6SMC^3~AC+CatAm_c%{xsor<{kuZi*BUm_bLpsYZJkTj;)hM9uD}n zPnL+Ae#oeqsM9v?5c^Sy;KcBLMjw+0^AhD`*F;ak8|YN0Q}##5vSi;XF&{#H7`vSe z826BeD<5h@EAW!ZvT(XmLSjtrHT4jPh(Z?w@Km&8T?Zb7{35*6Mh|q%4%v`=tbMgH zJPmZ=j%^PnxlsIN1=R>osJlicL&*--G+oA#1<+lonkjV}=hXU}dJXkIBJOWv=+9<{ z)WPdfE{39@hMv(-hjOWd_z>+x`&o(PpOKLxlY6sloVucGnW16xDwsXcFn;B*A>fCBJ~S$dhN!t(G{(v-lM-gThX(B%W!%IT=@9K?Ib_T` z9Ad?j88^y|e+T4Y_cH^_FjLybxoA(q8xtsgUp=9r_(S~-_$P?=p?=CPw-bjlD83B` zEvAZ662tmY77DgBawm-Ln$W+PaGkvhZ8YIR6C;yGCxlm+!ItWn9$60Bn4PGfaz7yv z63V%D@&5oqAXVK|P*0&B18=E(E|zpx4bR-$c5_}y{>k|#KC~Zn&)O2`VXnd-f%ur8 znLo&Wb9p751TU~5J<=?DPSN)ebc~Sj@b8bpQ9r`193N8>c$ZhHYx;2^{{R`vNj(Jr z0FCv3eu{lErfrhR`6jxbk%!#_1@gd8mWZ!ZB)!&5{dx?3azmw}u1>$9R7B>jf-%)4 z6gw@Sxd%&(1* zu7lv8hBjJ-+{HKUkEZRAvb|jwLmLWy>~n?2)3l;A^^)jeuEG=LR4bJ}!sbn{{{Vx8 z6iZ%JcOT?m*zxe?57^iL07ZpOeQItlPI7$)-O(Q?pkcb+l~=V-`BnN;PX-uq5?07VYiX_6oeua4%cN;{%6Z2)k<0Wb^~$Q@P}khpe$8#5okfJK zkeuG4Xs6vOif*a;Driv9(6qf>;QG+J*VI(Ef~UDy8mRpC`V)Ny#JZSq(yJ;6y$#T{ zpW|g1>2cgwbVtfJl7Dpn02(NY=TvG~Hl%-uqxhcXuq!`^ySNribLBA~f%hX09LWAO zIy3NO{F8je+UCaDYehx$*{H0v)t9o9f8X8df{liz+^G||t1DQWOK0or#X__A(|rt1 zYBo2`RAIxs!-s|?%8=hdv-W<)#e3Q~JEiO<#@^D2#HmHNLWd@To4c+}eaU61{xuSi zX8VmYxmyy&xNVJ1MEr_Q;+uy)Qh%YW>F6ape3ao(S4gMAY@6^;x`qrnk%10W$JoqA z)NeLFMJKqfP6g$qR#ZiE)9!nXi2nc_vTbFk=rqNC;;gH*e9{sSri(dhX_L)lva7o* zcq;;?o{L=vl;1-W!!G8jhP%u-a=MLjq)vSh*!~M_rd4^j3%_Lx(M{1P^0|GpUOmRd zRt&y&mSv!(w-&vlcT2&kbkvI=tPI zczq7&)Wq`1zGgN^V~G9?7g7_+J>3d17HuKi%s(V4?l$s|-1QrRKgQx&xOLoG%FO&o zHMyTow)+*`(7SO8?qco9Br0t%Ti)n@9e1+<_p<}@W&`TTc@&9wW+LcQEjSgMk8+&c z4U>HYxr_*RkX)@3?MQDb4!30ZYEzEBibM!`BTGvC;b!WMgK_sK=OnjKQ=QSx@{X3o zY9_hpSpm)6QH~>HT}k;T+QQeDUA{ycfnd&%v99cYBoW07;%rehuq8u)P(s=PHGce*+M?OS!6bk zBqtk6Zi)GSp*&B~XER0)9B!g*6^_+HZ>t;TWvpop^&JX8x)1s(H5~M`P}>qFmgeyl zq#@F;`Y?Zy%+!CgO|xJ2Cxib0(D)eAQ@pN8b;%7au7um_S1J=-M;E%R!^Gx+3G%d4 zsQqMi=b+?O7Qeq*TsG|~5nzscVpoEt3e2>uN*paJUKc5t%show4`hp-~9@OBkN$=TeAlRV^F)>vL9#Bk3)p#VA*I z1q+LJXKXDht}1Fx|*Pz)y**^uWR(p#m zMLzqs=@ysbZa*F>mNQ{iT55`5UHd)FO3bDiQ9<1!vOP=j7931GC1g;k^)QiR{{Y;D zJQZ(^M%A%pA#D1JyS<6z-zkY(v>IEAMB8ym?G`jQ7V{Pl5Lw;IQ<$$P;x2BZeBQ!k zGTdvyRwRfJWqDCNC%f2hDis_A`GrECeNGmf!&sl9oONY4RHL|Udkk<^(<&l(xQjx? zXy z*o4tf-gOf!>Ne2c;nwO7yfn_IXsl5*;&r&M0^q4>T(=v3`SfNiWpql%_Ba(Ow}NFk zh&5=47xI=Z!pq)KC)^@1yXpP34I$TwH8s$Q!rk@f0rDWmBpyAC@Gw5N@p!;Y(>KJR87;xo6?Wu`ArX%aY zXvd28bTk;>__1}0_Wc-r$ZG=F`xdRa(7e!iZtOi6o*lBSM~^$oD`V)UQzkdGpXh?- zAqBT5K4w>qC~X{8(kX5o?%`XNtf_{%kndLe40n}=TR!(u674Y`LT{N1%P{bIm}z^E z@{Bjs!=F)p$*zVX>A;1R6DnwZEI~G;EL5ZWDZrM}f?}CL;Y@ENC6gRUzJ?>{PQJqK zA&1n%T|~KPVm^qkH=|jUMx_y{vm{WclSK-$sL-rF?6RoP<}r6r!G|(N96Jm4#;C^p z**DDcLtLc%lkRMXa%LR57%z1UH_Jl2tq#8Eeo-~m4+zsQ#B|+Jw#x3{bsB)%Eh?=y zjBK2`lWW|5QBMXRbs2*Wh=e)|!yj!-N7QdLK0OQF#)YNtEiEoarAi&y^F+|u_UDt$ zJ=8xV4joy8hqk06%E|tRi_BMs(a6JU=~TmBM+di#YnFy0@J?>oP-dq5?bAh{0(xV7jZ?j?uilKX<}BL(iT+R5#n8G&DE;sg7GozEc~y5bhxkYM3xT z7GggGeA%(&gj0=uG$9t1pANa)?lmn#o--|hu4qMwtjZxcB7>Y5kr8}b=2(qKLAZ@j zc~f?JmE97XO<(nvhW`Mwx6qsD%JmdEnS#6+aOI);Cj67~_?M4W*s#qFOBb6dnFZWk4MvaB+sE%zwiRVnc(?k_a7Ng*^Z0 zS`{?A<97f`_AEngoKgMhETUNxJGVSU+O`;-HwGKrM1l6Uk;WDE|{Jy$nIGZQo zkHD8x4joK2;Qofj{@bXiCWM(&XxAQvrAsltvcX=*F~R(eO*nKZmW3Wo3}b$~7jUw2 zC;A|{j5u^54ubuYeM!EhNawHH5o8s;=ARdMKARQh;|LY#V)+$FbBPvlz4t4&H% zk6f$pX8IVN$WNN27gG%_%)y5%L+zmFwo8HJzTu#d(K+)YLASKGJu{(RT)!a>5ggab zLQim-+FUH32XKmWdTS=hKBN5*T+|^BR48)^`6t@RzGv~Y`$fCBbl|m>kI&Q4tO`u8 zpvkfKAt6B!L_CdcjR!H6<|Fs*O--JLrFTc?Xg10(Lqe|NoOI@)hv5*1Ed;t1@=f(* z-&~{P{S#gtO{Rv0Qz<{2QF9i%c-@R_ps@D zDd;aBVqa_2(=B*tiocs{%Ayuj5QK`A^bt?=%FNR(cr?nNH(Rr8Wf4_q#z=qadX>sP zGuG_3k0^LwV17orkI4Lu{rg{;uL7Z{v#F5)^l+N^p1^!l`K@`Q ziceqXrJ8P6DksOuJh$=-?;$YP9g{-#G25AVbH9>je5BQb6tT}NuQCYVGtVj~RIZ>& z{!_$c%6Ff0gg_$D$^Z_50#s{+c^raMl~bGJVdXitJc=>=bw-rSFeh2KA{s@=W<^3=TJG)!%r4c{!~*{ z-b%F+iU+zkxfd07Wrf_QqE^lTt3_S#&yt21B~HK)DcV{Q7is(x!SP4R_`{mTs%qs# z!PxU&>ehg=f-Yx|U-IvmCr-eHx=uzw)pxRbu!3;527oaixm zrx5GD&LQ*o{zo1+K2**j;&M0~M{-z)R|g>HIwIlIivC*RPdlcB0&ec|;0VbGJ+hfJ zC=MiEqUh&;Baq@t&s)tdBe{*B_Qk{C?=y_NNo18&5kfqq_iFQTa{047@q)BU)QYNA zad|#(I_~c9K@CH=GovJu(KOCS_D{_5e3^(nh%F*BPU&IXRT$)ws;ePdC>uo-;tWmM zG{rBvLjGU$30N;T)GLNoH~2>poPEr%CMGwF418wsTEb+Ox>Y&h#Xcy--SnzQBm%eF zeYV(;LMPq*Vo;A8$jjp84<}V&t_3u49Pkf%pwRbV0x7M67t1W9-F2i@D=Epqh*(M@ zs+y*jh*!4zZMN57gj^r)WX$qy<~2M>AVjToJYOA~6?`^g(-*W4_+Sjj5HP=uo zxa{&FoUa)R+0T|!;-2#xm4Y!$QA$R&(Deuk)EGlad!{Z&HAwu67zxM)LC58BOu}2T zZtn3)?$y|T^#1@+oN_;--R}3gVlNkWw%aPIt$kjfNQ+HNKrQvLr8L$y+VkUw`koLz z07^WonZx68rctz_s3C%&D7c)2?x8i$FvR4?7Z>1+u@rs<6c<*#&?|pln<};B-Nc2=oo)!o$T}cWn@n6fjc))6wb?7@RsVP4x)i=I^0H6@iVr zK<-1Z0~5?{vWg6IKp|Cp0B^aE2oZYxv%m!NW#rp6GR%5HN5@2sv^ATt?R^fB{e+ zui%sos-;vxzz$Xs?RRxW0uetVuQaJrtb}k7Ksg0!sjVy2T0&rZdf!T_Ap=yH*D%FR zX$tHXO72VLXkaeh%XlEZXF4|rl7~XLPH>qO#5K*q!0C1gIBr65z{PcQnD3AjK&Fjr%z5WTYi0)TU{LLDhqMnbFwcbY4b$f3y<+Rw_Bd8dfcT9LK!-pQXY8FSI-a8L<> zYp$WzJK4ZQAaiX5ZK#Vv);{X)@psnn3O%o(N8?Syml^I-%1u=O5bP6>T7_R?9NLx9 zN>;TAiRunn4(M_b1r!`rEr-CMnvO%PorI^A{Rfr(1C}{JP%e0XuudlvZViLbowmNi zxacuK1k?~Gkm4{3IKH7d1Tm+=g1QRu5y8MrF}>uul9Eu#MKqIkFiO^y*>QHo=3_U! z&%N1p_lvzW(lUs1Xo_21gRqoO{Q`316Cw@Lt6elBHLX$UcbclA7;RWZF&xrz7$_5p z?XD;zlh9%}PJn(20|x^YSFv}r&FnIneZJEzt!}`opeb$b_WLhwqd{BTTpS9b-D;JL zSU%A(o8Bk)iFdoSd(~B9!B$XIRcheTfT*H?MFes)cIQvekH|$ZN8m6ORjR?sahu=o zGjF=9Ra)umEYlnBzWZ9Plmu-cm%8s39le8Uy*g5*Rd=@HQ7+<8{jac8s+ANRUAR`~ zM&eMxMFB+waxmajkN)072ZO_asv!&#F;uT@?!&gU6Ujloy4Ou~)oxx&zrk9u6EQ&_ zmEGO#zpqU-Ez}CC#fOQ2a74J`?|*Hr3rlHVWAnYCaZX_!M|#EMVE+Ja*LALiwMA`V z5Vzb-?;L^1{*A+|qY-bYfflV%@)-9xv(jSFHPE?77{d&LK5TCF5vL)X9#ND$6?W`Z z7S_70w=V5O2J5YD?%MwV6$s#Kg}tY!?{}NkUEbSouyB6+U1_GO%B^W$!NOnZ_j~=@ z2RW^4UU?73;l<(aQO)f#?8847i4{_rzM{|=te}c7wQ9Or16?)2J8gCOn1X4$a&WW> zK=!VclGZKZfOt3f;wS=wuw`+@5!m=erd{rBW)c?_lkVwhpzUnOc^hD(>%XsJ14#6AOVQE!Yvoi`{>tld;1Zl%Ywiqg@t!(wD*imsx+~cX(z?Z>uIwwdKn1_IYS&+0wRp79 z@&F3l2xFWc0N0?++O=W9&L{N`je6ohIRfHaM16t(05QY!o=wHP#+X0N@;JPxD!Sd| zpsS(iLcO-zZTH!Fi`w9xA_Wu)PiqiQK<#&d@BoS*qsqN*ErrUttzS_za{k_IGW(nRS_AO z)0jupDAQcb6yi*-%~yJ^g;)rcdAiqMPN)#_;^>t~0ci>0#>;^UL{C<|V&!59nNUwT#M>Md3dEmc%lyHxb$d8@j;v3GZ2q9YRD+wCT*sa@u(Bnb|4h*6-Poto`RzV-T;ra?xE zr=lY%$&3xBFtnhq?y8)lYe2|PC*yn2ZlWN??J~K{ad9{~J#AJQi+CU?kZYUkb8GY8 z=Dwjw16nOO7DUo?0|2}@1$TG0#WY5TN2AbStFKq1(XX}B7u{8pkd96+^LULC0ZtUt z?cL9DADsOM$e!04b6V1IS0KFVQ3=t zn9EL2dd1#v<9!F|u7{yi0BB_ghh^!Cla+yjp0}-)Ro%zbqIdUu{q|T`Tac@zgMx~T z0eb@Gxu*x`T?P;Q<}@G znWlt74Lyhl0NRljYt%SO;p%oTt)qsJ;+Sw#)!EJTJ;}Tjdyj$5d@k?g#x;(6oJI5s zAX`omw)S+fis8ZKL3ZJJKOX3~tmBh2rY{Alm@8uDEgm|A6D4YUX%3-lL@@#eN^?t4 z>sUR?3b9qxEB(bpoLm*xONf$7j3sFJqA|Wjjs-ml{SK+(peuE`MT%0dnmr=175f%z_0S@7xAY9g` zCMLH+wsYx)1$J5*;ZGG=0-T{mITR7WK}Ot#uD+nxLeZ#$;>-wt?H* z?k^A76KJ3WA+DLv>I6*%$RUlm@V|x0;{;p)Qgyq<57K`vB0?(fVK|#Yz{1_kyO-UJ z#1og0<3+^b##K-RBH!mRRbuQ6xY%xthf^t(kgBSvD2+U@vjs!V24V>;B|Hw5UqjUV z&PUuL07TjgUr2+}9A);0U!=vQVvsRq>~su^6p3*TDd@KD?7Lv#JyFBzTbWC>rFkmj z4a3V+YPE;QmrcfVt|U{?M18LMTC5X+(BiA?6{;u!0E<%dKH8!JF82vGt##AVAY(A+ z7BmdndW;er<)^RJ=@1Bm3rpQUw|h9dIJJ0;5aQ!Zs{5Od${GW(3iZHlT2&Tgv| zMp%iJQ8AD27d(!I0klEQ4P!BPwYj2^)-@4Jfy z(ijS?qK+oO#gp2)m3Pv>)6yeGQ!e`|tc(zcTeNaj+PgWmcB>SE42TqQeL}S&YI?zo|p0}s4zqr8AZlj?y9P=dmiHlpdGHt3X#D0 zFtCzU+OvXi;!{gngtIuHa0%&Ei}-@WMO6%uAaY_R!Rt!%YgV*2wlZ#Co$k8cD<>zQ z!Bx=w&S_o(C@uFc2qTL{1QW%Al_AxcQ%u z!R>e`Bl0*ItXouWuJ38h_CEuWlbWD|V?^Mgf?Q52uyLqn=2JHJuC-NFUxXjXFg|DC zaU0bluOvtj>whDXuGLloV6K4_$9*!=-_#9o`-_RB~DkzKFZJbHI)&=}3 zs=L~%$VUMj)(=Xm&ELRgs=C!7gN6oodI1a`$E8(ftGxopZ?(AH*;QS_s{BXbGH^Pd ziOBdIpR8fQS|z2(BA|MKx zKbrm*i@WQ0-qFZKUG%)3wN!dqKLOO_e9ypSU^$LyM5mxhql5E322WA?D;BQGq+@2(zo_j;;Bi%p83^|Z`&hx~es-@17FFOo zeXUk6{;*e7L$y^S)ZkO6^y7_-pHZuf5hO03^QjP%g<`6+cpnSteXjn4&7 zVFX|lBXI?AfPr$6q5s+d2mu2D13v(sLok1j%BG~od?>?B71zo)8+oen8hMYaMqh&> z%1D@zkrQ5OiW1FB+=eRf^867u+eZdYHd^*DXkH}`4=oVL7wBHnM)g4Il84^lz)Ou{ z_K|21geRs9oug}#Hz8&-4L<`X8lbAa&B`z^b62%qh|gQE{SSH@3$2{{SbpXY^M^KT@fK_$rLmW0X+UI1f2qD&(NV zsrWM*m*KG2h`M|;2?;Yh&7xjY+fL%bpq4`tMt+9napE*rjNMf+FF{#$pG-(rUgEXb z;KYthYK}ySR_Xl>#56JLhFAQ= z>7k{SdX>>z!>Nk8`4mxt@}nZnNV0M&#jIE_HYVt=8obpqEZDxX%w+y957OFL=&zT6 z+IdN$*-@Qw9R%BJN;VtB8m)iGgwc_kkd3rs;5^t>k8vH=k#uB( zF_Ve-lE~3K$1gDy5@i_rIMuAsSBJ);NEq#&yrgle@c9xm^6)U|d1S+)b>$Bg#&^N9cVzGW&^qv`E&7(-yDzZPkYkSE%u8&rtB*VY)mJ{mCCum~^il z$y|x zkx*ntada{n$RdL{)nw>cT&nODbSjCjE#r|r6zJi^RpI51Un5#KBM}L_)T=*{d#^pk z71zk2ETYomlk6H8T#p1ywJAu<@(Q;V+{G#Oi}P;zG)2Sw~9pDG1E+ zn-Fc7Rmq84%Z8+`juq_8d?l8opvK!egX%oio}3dvr!a^fBFjkdxm1Vk6I_Zv$|!mB zGK$|DNZn}mkh0QB*6^NUaPtngl*)87+Wf+SsDBD_tVmb+uxT7CE7?oD` zE5_xBCyeH6q;N_oDy4_Rj(}GR(ZNzMcWXSR`OE$}B;$)H^ z5?ZKF4=ip)9JdohPGLt*Oo;l=lN9))4SWf5rqD`dYZ$F3mUl5T@{zG2Qhh2jA5y+V zJ#)sQ7Fv8PtL`!^wB7503e0vw5QHK}XjzIyJVj`wR8@!B;5MtwI(tv3x%vvsNr~AJ zgOp0Qm*=R^-z$D2A#ogw+(ODV+DpuD*JC!)i8m|HmY)MJX(V)ztpgn98sku_GYu8G z^Hel8J0vGYUqL6t&bf8ag{h2VA4}@uDn3u~pMdCo2l+T|sDs)(JQ@n??jmU+W+m%} zJcPH^*pe=(Vn}V@e!8~W>&1LZ@M~F<-}U7ntrfAA{>tC0F+N#|^2|?`H_>Arsk{hU zHtN5xMO6A3@F>NN(zr>#6iU>Cw z*SA}3ZPxOQtU9Xnl32Wo_g#+cc8-hoqP5(FemjYcxH4OQHj8GuLis5D6nvp)%c6DU ztzu7r@i+Pah7L+mcSjXn~)3xA{C28yL zW!IZ*WTP$DApKfxNQI?* z`F*XqCPqr~ZTj@0`}9}1{6+34U88KoqqO|Gk<_Kd8pjAUqbV;J-~7{-TYF^vpk z5Sc>|ibNp_$oy|MUPg2;DZQ6jKGF&8>LCb1*2aFwS!~Cmn`J*o=>2O(`YF=KcV;o! z2txT6&zsxIK^?4Z_?gyjm*hWb`5%e-5!=v>EMp$J$7%{l8yLrCF^ppv^dSh$J28y> zib_f2NXK<*L?Qb|G3ZvFy7KA_lo9(s$-#b~@MF;Y3fFa%c=X+u$w$b(MWQaBcEb4? zTl5`~PsoHXw9|F+A@tcARh8a{PoerhME0SJSdx{6`8t%M~%goP~V)qC{_UL08 zx)s>RWFbC67KXb@r%#xacigdr4) z%!@5<%?xDs6n<7-;(JDl)6E5K_#$5-2k?GER$ossl|a5Pu|-v8fl*MJv=iJ=yH%Y! za+G`u&*Xn2vKEOS$jI&~yBPe`36zRFvxD*y?nQX7>H3(8jMY|*N;!Ie!_>w*Qd96R zi?QmjJi@(cx-Hyx9sYgfWjqanQy;v9fI* zO}0Wm%B@qlurWTq@{Di#jw8hWa&%En^hdA!m_NWupW`)EUkx8$6F;wAlgjVfJ4Z!~ zW4$^ut4g&P)a9uQ28^_ulXfwV-@7l33}gFJg?aLMStS@xHANMy+Z18oz5PB<^FNa7 zrfH|(bhnwC^(puehC4eUKS6v5g_`c`Te^Y><)KNipK(D*)8aNhqZ#>qn~nE2CHWu7 zlr>I#b^idy8T_0aOt6pg@N?F^3f~wj6U56;fR|~vmVu0HLMSU3$7QB;#iWlSiTgxf ze#Z%lQT$pc%T8JO78}b; zpFHfo578&=Lg{Vyc@e#&-^u7OYh zBe`@ZKBj-9WuzP+`=Hq1)vqLQDRKY{zKgt&;GC%yN)ue|PY zHneKk-$wcoNluAJ0vA>3fh)7}7nDM>6{6^$xIHRF&8)9UH(D6|!YKJL{!BlU59C6& z{Z>0s@=`I3Vq&s=#Ud4#lB8dKjeBUF40>3{(3>%`3Rj@4{(r5r@+iJ-8{1=jO;MPw zjC%Ih0zVRaiYoG(e@Q||>d)ryM?3F%vl3j27offc@vm)BY4sxeiIRT*0INw4F$9y# zS`dxRuegXq?I=pBhhG{>xP`l-hc4 z`bOP0F`O^DAX0s7HBCN@Nvz)jjwJH+9S{CQmRgD_uQez}QLzb=+CwMA>g z#yUA`kLVG|o{ST1uG>acWUJ=k6gt*d=qlY*_!)THfwv(glvEP)5^zF~5}uC!T1wxb z*x#Vlr}_?LMx|&a5J?w{Bw@x{q@uN$_tWZOxBIr%t(nYx2gQxPM9_pUAtVv6eMvmk z5Kx~fFxMJnpXhs&XwdBh5z9W}h(1r@&&c1kzAJ|k>clz8qm&H_)zQyr&N zl0c~^UfH!Z+EM;s++Qu@kpzmXB7&PS-C3WJJ;xG^imF!fWHPaP zZ8zA@+;8BFln>peoor|1P`w3DXx~R|Ux`H&Wu-E_OhM6r>8s$_-uoN<<=$?QMLxYf z#3FAfa%GKMS&gG|INb`(jg6b~5Rpx%t;*^58TnT!7}#7esLYYGuM$%s8{cDpyg@cp zh>JI&k3y)Vq?_cw5~FQf8`?*oQYa`wN8&M$$VQ6N%Og+J&a#{6;&1lFXLV*)H{4KA zCEI3BiN1B3$V;?+25d;5akiCeSxW7+c9F`=V;LmhFG`(T-bQRRHZ(rsbV@h-d(f#V zyBZ%x1#P-AZEeZbBn;Tx+eY47b8By_Z95zNt&Yf~T6o@iyL6z~m#zu6*4oRxY-Mr zS+vHvQaH;|JZT#46Y~3wkTMB2zQh~m6BSmUEdB|y5)%8HzaEy%mFD+2^mo$VlAEDj zkNOmlZ+*PyFD}P*NUYw6q5Z|#n{;OEc3ySPjqZi14$DIK6pHMh?7JMiqp}j>ArpMR z&^dJS{ly7cjgR^^=HF)JXxGGj8UE+Fww?a~s%@)(w9|P10I;mv`qteA{rYsr{e_?V N3jYA|xqszn|JhD8BbxvK literal 0 HcmV?d00001 From 2a7a5506bf52ae83c7b889d8098894b9d588dbfa Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 17 Apr 2024 20:26:28 -0400 Subject: [PATCH 303/329] update --- openai/physics_problem.jpeg | Bin 0 -> 25368 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 openai/physics_problem.jpeg diff --git a/openai/physics_problem.jpeg b/openai/physics_problem.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3f6d3200dd7c3ee1c32adde69beb4684a2a1c8bb GIT binary patch literal 25368 zcmeFYWmFtpv^LlTX$T&IJBA10bQi0H7eFprWIqp<`k^KlkGK0w3)aEjNLb>T5b9 zdqN)nxEvr6i1)2pMY}^ld}Rl{G1M`4luugbl9_{jJG3BVl>9St4bi~nEzf6@j$ z9sU0_ZO=G#5=|9qBoN3PfzA^_{*LsW*0fZD);m&}ud`UW9ac2`Y*uv7C;$92!i%tS zd*DUS%IBnP{~|lA(Nyt~hy?JTbAe8pBH`oVZ+K0HF-oTId2v)3U2?F=j_sTVK$?aB^{qXngTA4UeqpmbdT$J)N`zoIU(G}gGc>Yq=J++*S%0;0s_tWz?Cv`Xd9Nbe`QoOY07W}3os+qxUFGG` z?B1>w=h7rcqg0Ukl#t|>meZT;)@1A}YLAU_A98_%NpoX21B8P%D4aBIh}8M*DD z_*ECh_gyzYxt>ukd$Em5H};KnyX_9jTxF#QAGIvigFfqcqI9PPt6Bn@cy2u0aYzGP zpcWX*%;~ppHhvqC)f^V}fS>O0!bFo*?1dTJyg zrG8u{K$_$*o9Z!rcDo!SCW5`Cf~d#x9de-4nERsg+|-E_k0%FPmPQg)T%Cu}YtaL9 zfw0}RoEaukPTQ|j&Qi*sf4<^hWPAb)(d>;k2CR~~v;8$Aa|V^~Ppmk7Be!yoI*KI% z2zhn7$8@GJvja*sI5dkf(%e%ijM|&K&S!*?k7(bn%7k4jy%&;IAPgUE%|Gp$vO0}& ztQtIMq}8C$L%AkTTH9YcQIO$}M8 zCUi#Is5Fv&f7kh%!y7nryfM`_T43dCjMLn7u4dCN`$ey>38n5ckuX|2+aOKDDwFlH zyM)KV1T8VX4N%8fiWh6&&@*pIWlRou%*y_I+CQll=;vJ_>nnyzNJz{}PH7zMAaaP>>3Rj;<{QfsRP9>bGI260>QP!D&j}ip%bh~&x_qWhLN1`;#l*rA&6ThWzp#%Efp=cD=QN1>^8;}}k&P4be^VX0jqu>f>Ju!P?4WK7<-b7Rn=2YOp7d~D zFzXtMX56#2-xhF^c=hY`$(VLnBBg4m)7tEF#>N%G+NWY}FRT+VKE z)sTkpKNM|B*xwr{m*EzZH(L(fGu>Bsy*NZt0n#$DvqM}05#-yl-#-!Im?fRP;NPq1 z4Nrp;jURkN@d`|HoEdJe2GdL9|a8|(m zUR(v^9Mp)J+WVF#5{`F7dYl@f?3y6RcX2U;DMWsU?KA;r`|wyEsSBVmL-l z@KKPl)@rysORGS0b1a`88eEw{cRY{Wsy2oG_A;AJL)YD3tw6p#C7^d>$v#yV3krX3&uad# zTIJ4aj)fMtto&r!^o_eIjO0k;Zb8_($d^!RXvMOn;#8J@Q(i2jyVWICzIft^>B2%gKVNQF2@Nr=Ckjy%wZD&Ly?EU9p)Zzx|gXZpOIYqp+S$X-n z?gzDT-vT^m2GECFTYC1jzoEN%o*y1xm@(rD^*8IZNH>GYl; zDngK(RWZU0{#=GQ^e$?Gee!xsq)p=o_>9VJzb=n$fu-gEcLl?ZIhD1LXb@R{>>zat zT;{zRiLW%x{*{MkwA;+XNS#jCH=8eSs~3BD3CRFBqhY`OJYr^ca(}g)fs@mr22EPH zu#m=O9~l*g3ut$DVgS0=OY0WZ6OH3}<78`!gAH4?B_1+dn}vxbpiE47W~al9!!3Qv}P>Y@(9`P4U3~` zE1X;Ah!ID*X^`wNOuB4}$U9|ln54%9|ESPHcxh==)mqIo?TR$K)R^K^-5c~73foX( z3}E;sI0xojnu#=0$h2~j@|U+5WN+V%`75QkRd~IoG7Mbmf0#)yu{VVJjnaoA6P;sf z6aVkCwMB`@POy(F61vf+nFq^~D`c&D0EFXzT}O&=Pd^gXp@KUt#~o&q6GBc&7UkR@ z7J{!1vP($8GF_si_7_G@gAIzCL}F>235#L2|GNX|f)=_Yt z00_+bdZbFpPvImb1`15U*l^_cY+gB|tqo9=jM06ee4LiM`w6G&{n{+{I-O0Z;pMLZ zv-~?nXtTcT;2+mAML&*B?D2Ps#|9Eol69HN}-PWy{^Q!9g894b@y`758*2E!W>wXK4eqV3EGBP zZ&pky&|8Y8BQAkpK*%e^zcq$N6BfX#0BSh zc9MIeKKY>Ni#J1#98#y9ycB4W=@xG1V&^KK8`4{>oUe0p^g4R%@1>p8Jj=R0)AIcb zGX2mmR&H?~nl3bJ6R1Z2#UJ_)LCo=fV8{4CCtTu{oRL?Q`4Jul^0Cxdcw)rhChNG6 zRb*uw#jaA8HpR7p&ch2VMZ?9$xB}aOa_8_H92)2OU=GYzcalFSL%mCnlYb}2LZ*cv zv%*%C)G7fx)t@{Gl%&6etC=Vwj=el2?5S!NPtQE0y zwG4Iim$a9T1jwxX^HB`LD6on%CZC8NYSyG-g^-vXVs+C<#}cJ&t&XjRO8&fe0ce-5 z*E9`^d@6jx+S=p_fl`h&l}dg$@hvz(Do4{zu{~0874eSJXWY{JGLh?s*3l}9^YaS= zO80kfS{cGE9i~av+Klr)+aI_F=m@pFI~57sxz4V*!bE`Q~qk=TB45ITNBNd6m<}D0USk*BGhQPv!5PXYDq{@ z?H=3|AX&4Vd#6ZWzh|<)k5nEWeXA*gbp?|(9>@=m;N%fuB<^*30=T#3ks%FDPuY3C zCax)y&kY-duEQun4O*Dhdd@AlAG6hYo&fmkYyt{yGZ~gM4cIJ)MoL78B1$Snp;5_a*Bru`}TnK~QC$501XqekS1)XVj7x9@Ck1t3#G zjWEeATryzc-c)Yvz(?mS-cG^U#3&PjdhVQjN(Ya?B@c-{)ZjOu!~nU)so$55O6_+V z!wirPoj#Myu%CkwJKg*kKNF_HxmH)0EfBMkN87d18BMWWYGv%RmshfU@#1YkBy7=d z|GE+f09AyMuIW}+)@0VM#aX=!41APqJ(E2aTLhxBh~7_sW+ex;7GwG`trIE-mk%Wj zs8SJi&iv;dQ%=U=AWjvXya zUu;GcmBTB8)mjaL1?0GS&V)t~7GZOSE;l&m2b5H;p-dGOYUL2&>npCQuMTx__p@ia z-d z-qHgjR6b2hrVFS|uJuIjKPy+vXMS3yagnX+4hs40V~pkY5UC3X9yU@?y-D@N=NTd} zLWp&Oda=gI6r6L0TDx}S*2;Bz?Mmwg3{&$2XsuKHsF-LSrT-jMl_!lTrskTIgpKiu zE1vQ-71u0D4=a4hQ7ispaV-?A;Ga zEBoC^;UHGwY%v$YY=5@1?i6qtPEK?`aQq6>jL`%TS0Q!?3;9m#%c*#|m_R%(^pelK z*m$G9lJWgPBBD#l1Uv3NSc3?F>DuDxB0P)vZ8I#sl1UEH$9O}gpL^gsRhC>^6#X{n z=a!vwdTQm>VV`qyEia8N$b0nXX-rmfD380ytI4i5`s;`?9set`9tiGWCcfWN@uxNiETyXR$@gp9vYn4 zeg?(=4w!p}IzZ$oSZ_6az?h}e?V{X|l$iU%#d#cRLh1gMm`SX}(lLW< z(m|onX`9;}d($u7sqMOj=x2k-(Y z{UH%J5|ly-4wy+X9ToNO^Z>rw+Zwf@OvEIJtanKH)l#YRQ86;9JERMdoGj030GUUp z)ZbaJlgVm{?VJdzJ-}Rj^CRi~<7;a+ljumX?q&bU7Qc&qLZZl@ELg>$@WF5BT8HLc z@YCAQVj>ex`;A?3*IC3O?lF!>KJohsgX{Mc(=F+4VQHNWjJ7w^k z?N7lPR$;45lW=(gCc#2#f_%g6XO}VC8FA;GeyXDNH~fTcRC-{wAK_Tf-TZfoH^xw$ zg(<{Z+)Ss~*t4ug|KJte4)U35uA+bot)6PRsM`5J0#)yU$yoby7e$m;bJ=l?NY9@s z-|sTABXH+$Gpa|M4%6lN%slf+nUsyc;l@(rCQ+49S&i7!Aj!C`&4POO8DE@1cga;m zzbH^1L``dnY&7qO9oXWuAMk!44=+i<@DIIcl$)I;ju=|q)^@dV;isGroodp|ClovM zjyLl+7*`Q7sk6V7%{JdggPF|fkH*cQG1C!Rq$E~9a`FnmTpKg=x99qAy_%~j#VpON z-45r7P#L>ss;S)NaL8yZ_FctD+P@H^uWj19iZ31-U3iaHjPL8PY|b+zv)gus>ym_jy=F)4kZt+%?BMRoA#Od2 zI6;*xAl`;Ae|uGu=BBR?K7Ca-goF0NTnFV1v4y7;qto|l=8`HBb8IO?NO)4Q2jBh^_mryhZ2z?3W;-S&mvQ8sM{c5rQJsk5B z_={GS7EKLSWrs5>$+2GN33BBX{b<*wfIJyo?ob0LA_LbQxe!Lxtf$M%S^2YOrcKFL zMP(j*P|)qRlP&J;Iw`4A`bCck2gYhNt{CsTV}fNL^VZPa!}aE*?(S1*0{V*{Dh{0XdM9)I;z3=29aDm3M8yZyr-!hfm8TRRs9_Q+ zYQEb;6QU-KSJDJJ0MJJkn=Yi>eM43xx;NHGH#ZN;!%y`7Y?JQy@S;i@`0DG;saQUxjf9hRoOP|%*og|tG6S%kn50djzh-AB zvgKkiCaI#~o8lP}ss1}q|8EOZBU3zCDZeg_cz2o}^(X|H?i(vf$G~}Nf31j+-y?sn z>3Y;n9Kdl0R+2!l=jg?5ejJMLh}L(*OWeF-&o{0TK`YS)w)!6T8Y_{x^l4IRcz5`r zKU`_%BLRNny8s4fIvfl;x5AZjtoq&I=wC0;glXPEM_U84x9g;$e`pUn5gLc~bdqC| z09oZ{kXKrZf8g1PpoV$F^?Zb@N(4hlKS-y`RrG9-u$R zaHfx~3r!aWFr$iu)o_*x!=TvaK60%;$P|!>5V)+aes5qJLO64(<4!#{bwnbI?lM5@ zNOe1qQn5l7uM{u(ji}sQKi`y9meID0@?xVZ5WT~=M|KB4ri{aPI20~14Er&;;VYPgGARo!Dv)+`H26F`HI8AFrNW0}UCir&ep9EP~b&AoY44HwejWqfd z(?^qXbU3oB+j4X4-hvc!CZz3=)UW-s_4h_lH$@@BEaIJ%%;cIzi|B^fOrDFaP*@Ef zRLX$@BG~;Jl0Xq#GZfVv_yHE20sIraSAl0?%`zDOh8)G(i;cTyd9lQDN@`mex4|R4 z1W$Uon}C#LseUwPV*mV3V?v6;%@@b=r7Woy0+$otO*%nj0+(Ti(9JODw=@AXEHJUe za9l?WpsNZbv=SGHvn+<7D^P5KElgg=gG(N(l9ZY}4@GFJ(4MM| za5hvZ8$Mq8K`o$K6@kruoHjz9H*aZK9@zbY(yE$V>KxWs z%s+q;=>T3AO|GvD<0yQiDunEAw4ilCfcV0|@JrNHAFV?>y zho=7imoNN6AJ0iL_?3(Enb@ZQnpMF_dvEi**A$Zy%UiKFoZGkT-_{Ce9W%u&emJgi z3$|p(cev=DPCs{yq$GhP0Rlpn3QcGXx0N;a%!PPyr`=AA*h0PyRlF$gjZ6quY^YKv#t_`9xhSHP2 zASzH|J1JDfX}rXhtRF&BoUY#pRU}vmsPfC15I{h&LVjmlE|%BWWtnt1%)e9WS><)u zyv?L}rA;E=@g0kXo*jCY`li=IV@S>-ioO6HLq9wl&y+!tJf1^))Q?YHu*q-`mYLuU z2@r%aV{uN71cIvt3xc9cKA0NoW9p2cnCx45xJr2JzN5fjuHagT-`Cn5tYW4;t)-zj zxSF{xJ;+gwDnn>S<^GxgfG@oH4{x2e8}WhcADmgR7S=@wCxj^!n;0x42Ecnj`I(q$T^`67miRn46%8OTH6~WTI_XT(8hri zY$u&^(T|4i1(UkkAnIGyV>1gYxMRrk)attwL@em~Sc1FzQS;41R)#=_%&6P#3;pfD z&NY)rN2-y{dc0*%yykWtvYNGW1IBJFMQjr*GMMB(;1u)A|IK}NHclQziVvJ|2T)m8 zK|hJtRHM>5tB>SA-Oc~1KtuM!W|ik(u8aD1hFk~~R6XWqjM*Mwk73fWjI4b*dVt`H zNpz=7#T;2e&uY(}03SO@;l7G-yw@$Lsf331#CXk*5dnv=2lig_N4gOH?&#YLT5=yA zQxNr^*F;Y0zCQ#9FIO@=eca#r-oWUd0Oan5_R?E%t`D=A!YuGQ`-Ue#95?kv;1l3* ze6D`px7RJ^w*3ikWx=*xOf6^s`w0+hFZ%>=YkE8ok6)8|0?Z6r-k1#kOY*j{W;2oF z7J}1$-v@%7GEU0tx5y6^#S-Z%I`|q8$MBk|K%f1L&xg`ktGoHu z^&GZ!v@a>P2)`G%XZtEJ;XNXuW)kX|)FVC@cBV}GBGx0?8oQISM3S4h2t&?)COiRx zP&0|e@a;aCkD(z$F9!j^-s!tFGT#C|lw6Nd^{qrzT6Rk9yB>fiUhsLp#F@~d)KK}? zE%pC^kpADC8R41Y;~8~YgFHT!k*#FN+*VPvDCav6vJDuxbI9#9MCjKxDbZtBSD#aT z+0AD0jkT}Y-O6`o*{>enf!~5->U&h)Bf6o5*y=etNRQz}b=RnqKN6|W3j~~2*CJBj zpW#UcHj#7Hh`qr~!F&D_sipjpbD0E^5k(XgoW2uTO>jtIO`Zt`kB&>LaHRiL0>SN= z<=e?sxdUT+aJPBgjyy>ZSG{M@pR9sG%Yv;@MHIXS*rnZ6ZVOw+bpxi+?=j&idtocT zo9cLE|AUd+!fe^JNt+Tdi=s{^O6*`qADP!IBveIId@othMGsciidoI*{D!S2=kFEz zxSy+H5>Zy(5NgzGFs4D3FEMM$Ae%7eN69&)oK{|~(~$$5^qnbvf!F;nR@>r(zb587 z(kJPvh2}-NW3OC>#y@{7s3ofoHFYH~u))3N382fW0J62-XF^`!2JYBA`(>K#lVNGRIV;W#m(s+swlAiqA-(X_{?)Jpna@N8#v246BHBx3GCl& z$j=mfB|?ekm2AUAb62@em%u0dqwX#L=AuTu4~`UFc70ZUN40i1+h$9oeQFftM$99R z#2&b=i(iR$KY;2`ZN0we>i1&rwOLG$bA&RT;M&IK%rij0GU;r1$~j>4W}V*e&mC8- zG^xnOw2Iby{;ePBD++PRh*I7>m;hw2Uuaf9OkndNToXS)(DjSxPspHXb7}BIE~IY! zUDP8p!+*y<|Fhw~rqT`HHntHiiPOm(g)si!JwB_gHm_Rr3Xz^Om&~+u)$OY+h6zgZ z2W!W2m-*+kH|;AM^oqnm4Uwy6WJiaM9W?Tb6+i}EqfrfU%~a{5psDg)jMoMXiLqTb zrfw!y&tCdkHegp?=ciLGv)F_g)FfW4lQn&CL~h%8Yd6qmq@K?zRl@HQqG^mtnPiB%7k&?Er}FYW(ab!R=GS?ARtnv$uJfQ~IK2*_9<$%%W7@sTr*X zESIohwSs0b@?9ZnLaAf-z4ok)PtKW?<~V@d7Rpph4iI*1v_({O1PyKSOK&(GlUA zCI-%+I@0pr|8>xV2%jg={y>i(3e_1q13dvWNj6acfKAu3KWDnv5<*akAOUMh+APxV z28vWMj<~EDVn`y})yL*HO2UVqE?gkzqYwnqKh!oHm^DE5KYOHCKw%Ref{ zb(n-mW3M1T0dlz5Bn})n?hD429ifz;(75Vr%^G@S-T5q{RlGg!6TtXOF-$iS-&oP6 zMVU*hyl471+di9xN?9(+=yjyBZMwy;%e2x|_E!))TO6+f%R5sKH5fCaqTxkl$L4LC ztep3UI-_^r2OeLO9iQkGb~EPBuoCO+Kn{`0i13N7#)7)!q7Y%Z3leoxAv zYECQ7?YWbxXL6Zp#t_ROXhvyZX{;Hs5-f*7$Ie>j$)d#C?eT&s4$1&Gi2)B7`im%S-PvCR_I%qkt2U6d& zDrj6!Mp!l(^AG~QDfFUJy>a_lkF2YDnlvHq09g!r0?-}A9ZwaN_^72i^k9dC%La&@jo0ZHVlYO7;pB^CJMvi6&5|?sNGcS3&>u-dtGRZBusO@Jp>v~n zt}8Atq?xIt5y(VK9DHki;6zDcdd?`a)WCw_Rl9cnQQ}q(>R&NENF$(8H!%b?ZB?5I z^^b7%NzYO#Xo(u6HM*&gcmJu4U`P#zw6D(%Sx>{w4dgY4f^Ze79f&hY2UD7pga24(>XZ zz2hN-e%d3=@X-x-Gwa4=Pa)QZ1^QG;MR?d9j@L$q6xC<{Z^J{`s0Nq=*Ke?*3NurB zMhnaqW~y+~8K;r9qBCq@(mtX?$cmJ3o`#R5A*&Ro9(JR>(i#zIw`2V?G)}hU$O-i! zuaH2a+Q?*6LVp1I^Lvf`IG%)gX^}N1MoHi&d~R6ZvyQK zPY&+iEtPMlB=p*kY|W`Of}>z<`iV}zcSup9g97A{<)IiOjx{5;14V^%LVsDx(S*I& zQW}5Ct=EaAlIXcpP!5pmC_4a~@^&DCQ=*NF5$-_%N-WTAMMSG<4I-!uh|zrqnZNpMcv{N+ii;+pnf{W5&!X? ztz(#xrCY%5?I1hqX^&6U=exP1HqZnsJt6DV7FSmfDZ0~FoN{4glbcLH9kbVXeEZmwwJ+h z^{GzBg{CO*5szGc?HMbbJ9S><1H@0nvvDb_PwL-8O#da$^nZwI{n&1ZJNZIfL+3uK ztpPpMBx=xZP7hsz9BK^z`&40il}m5YDs( z{VMb{D>pNJ&mJzxrA}{6DozFg)v^?otQu=vj7uPB{rc3^;%58)bvPD_m1>wTBdPFT zO5^bLcV1*SfO~)@Xy)UxI5qZ(wI;O(#-WuL)iB=k3eN)Gx4H&XxI3;`a(jsz@X$Zw zhU->&=}!%I#BJ}_L~a|}ce%Tx*M0n>%Jpa7_qB=TaP8y}e^tlw>gXDOb0r`CbW(7= zS4=L(Tpes}N6}#-K#;~LUiLeqe`Ss!MC5`?u$SQ{SsygsJ!@;b%i#2~DtYj3!JDtv z$5KWtbSb`Xok!)}@%8RZTq~a4z8)D+yt$#a;5=|sv6mN^91N})Uxb@*ph2f`ql}Vp zdFdlvKxJY3Xb*&yKhf;lKdxFQ2~YN#@owMc%DZPQivyZI8~=6yr>7@4!gQz9Gg0v3 zj7@r1>;@aRI{Cp;qLCx?Em5}x2YZ+7gDa7IuB6r}*Quh8sk58=NVF)#Fpph~*+gUo zW`$WEI{NqggEkA}=;cuxQ}8%^$c6VB$YBdxp%t2?nVpGixc)Fh zSvjv6G$f9kI+WhKxDe0qL0xYpXTPD97T%WEQmtz!QQfB{9J9yYxvwE+4KcA-Q+y7_ zAk{^k)`HFj&#;;7S1datZUaQ9Q_(>sGI1T|hf9S~$skR->G?~^%UZb~h8z2qq@lG)AMS>Z8`TMg8U~KUN|Dk0TI*EG3$qf8U`VE`I(X~(uGc(aU!)6E zxQP;-*$AurP4{xbjjc>)U%)MTNK;CCRnFog&T$0Xym!mlol05~d8(*FfRX<&mOJMV zAT+xxc~Etkc!1~bWaZP4M%nKo0M6U3`jH5at>I@zIYGpC>Ku*nFrG;yPv#8)eLl(qZMLU8BlkwOe8;fuc`yu!QpRw0$6rK z&uqaG_neUqC>|OCB)%Q*D|25?ZOT|mtO#Xnd{v5hJ*gv;B2R5@Wn+ z(tJ4pRw()xR9BR#H}g`(#9~OPOEKw;(j!X0a^K*Y&KOZx0*z7N6?UAvp$zWhf-i}L5RodDj~_`iesYO*Z@ z6)8nSVs-zo{1PHn6f@o6QeS3oWPZQy^V8vG9ZFOvW!CUYRbf-U!d z4|K6{5)2bsWU`%BGS4j>*;!Cqs(##@!+}yFe^68Ft%blvC@WU$7O|&yau;oDelcnk z6K2;T@67~#lcw45a^MpxqxwpW#{;{wlYMD!5lU&IbElHqe|fmviLt6I=B;k=;qX zmpg-<#Nx}bK;+8)f0!WbY~fO0i`) zQ5*hYW&G#<@xO(TBsZ$GNqD)9x!ty{wBj2oR<$cuAY_#Nk|XqS6<0w8GuLBqM6Xd{ z%(;4g%EO_fW-BXI7qhwU$eLB)n~Pv&2jwQCrAZB>8C1Jvx9HDP_FsptHS-80LKzJ0 z66+qDA{@&fzgARTb8D-j+MaQT?497^i9F+Q^}YZi2_BZLo06TI_4Ox!$G@1mn8br^ z#uK1q<5>aU6qB0c&7@|__zAw~UiDnh;zEb%SCXqUZTiC7lb}6&A>AFHm zrv+QSsFPOo^2c!=!2LAY`HoBz(i5xE#?Pbp@R?*Y4M*|uaog7z_gm+^USre;iQfo?NXN#_N9+lIX&&gx3N*@+VAQIKiE{2oXBpp3=p@O zFThvX+cE^5+VYjxayDA{*FnQZPP*&9tOM@pZJ^SJBJKZ$@mW^xfJ#|cDTZMf78k<8 zdR`K7E}9{~Oz7E9*fdRIHpq~=JpXt%NzpgUgJ{D}dyjj#h=u+N18~;MM57H_xhLoN zTuhupPPwG?PxmIWRGh>a98RoRg*aaIkN?ET`ciIXO*!v8cW`i-JsGx!WncggB+07q z)VbZ9@FA&$ze3KN*~OqnKGt_nh5c?NXF~tGR0cRmTLf9x8Jw+1A~_hw1kwKCCpa?z5|02ygR!I5Lx0>m79&9 zapN(h^I#2G9dbA}7Ymxkea7soj*9TX*;k&nV!ipBo*%|iv0D%6%pNpDaM_(s_wxmFd%)luPVeH8iqZ!8m{NSocYkfx zV8C*20IUzlZdI_=65)=8-G({C?em(4Yp0{@M>+cq4kYl51r$<09#tQH@BVUHpfT|? zVG=E@lN{VULN<0dR#)!*LhN-gFyNll2JuxPtmT=^sC417Hy@YN14bT$)U0$ST$1gV zG&A{E1(doc1wq-HY$~rbIo2IB(0VOzGn=gLN2DW8=kIfkN+if_LgRyeMOdSSh{!~5 zlPTM(yQzC!A78HpnNkX6egU0w+g{h`{4;I1dqqaRxIzn%M9Fj(3plUod+l&~R#CQL z=jZ~?6?sjLuX%d^S(7wgu%6k3M>CxyZscR&_qhCRV!>_f?VGX;MrGDGrx1{TGSMHT z$UdWZ;`>fdn&kgM8vAb?G87qj@^gfAXr1?(akJ95&&`tC;^WzYA5!m~w9VEid7E7Y zdGj!!22!*?76L<2{5%R7P$O5igUKQ_JHLMsnsYG^%-sW;eA^X}o63HvGyknqLl)tu z%&J@a?sMN?Zvju5#fadjJr@t{?%@yq6TR2-;NuN@y(`+`Ve8c&{rs%h4noJm+LYsE z51al|n}JnK@~Le4MLgaLT#cXy!?$ZIp5?uElbEj_>pMs}&*u)CPAqj;7W+mAJ0trQ zbq1{8UA2pHlk__B4^HuYTVIv}_MRIEYNb%8JO`f3^eOa4VLH6`hx99sx_Rd5HWL1-UR3V&A~**}PPr2MUnxmT>bs>eq5g={ z*#v2wCUi8}?X3Ju3qZV=pMm|12+w?cFrz*9=F)0NLe!EXGu$ z{wQi;L2iF&>9?f)GrsAP*<4mx(Y@M+kSD+zW?k?~#pjS6X{&xKSv-@U zn#6+jPE6(l(1Lc^UN78!J*NIehJ!n30`&^56k*r#@g{2ZYQ#*@`ep<9jOpJ8UZ4c; zy60mZC$pW%w#$nqDNzAKljHY&#%^;4a3Fu@1pWfdnk=f0y%rIwm6#L%ojRa)Nan zU~Eg&e@UtZm`ZCI^L4obWHzKvC@OEpQbC4%(?!F z!aFcSWFYNXLi`Nw;6V^-a>&VzsOd+IhsGpOB!5l!msUVjHcV#|G+vv~_`e_lciaT> z5LF}(qf3CqS!zhlQ!kAzw&IBeNk}@DA$^`9)^`!Rm?2C-J)1@*Lc%I@Q8BnM26HYP zD&H_EjE-8yVPiAvIotYv*ZKY{V@6I37@u?ur}RWprf0$X(e-9&EcI0@uAbgC!*Q)1 zKSYcq(DVQ}bnQx*z;AoP_-fP!VionxEVAqYJE>L;8TU{WX|!{yY_hf-A9`BvL6kW} zAp}lasq`!}NwZ^8pR` z59VWETxcoWri_DRGvdPBv_oP$K+bmh=O`MHDxP|M_NhJirJSdySLEFX&L@CFHBNs; z&mYJkb+zv*FPGIV(2@6EN_)f<-|$ju)op>_9b^(6HEe0&=~?GvWX9a3D!|>spN)M_ zlt#Byb+}_5x%EnCxM;N1&*OYbeJpU(oy^?}zF~H+m64yara+0A2|}B_+E3zN8)`k7 z)-)g=YywA~Y<<J~W@*)$7@F(?S=+n;#fgBssfHY8dX{j^H z;n_Us6W~u<0&$P9#<0{XpF1W#w#eNFnkRrd>JK8IO65QN>HmRi{fDl2-pu{DLIbo= z<5A6VLz>>UT*87GSv%-NTo@H)SzG7LT)>4~DI!)LYyTDG z^75ID!U|iQp4}2_FSVGV-$vRE5N;-Q(RQ!!sjj){gX(J4+~=R8oWTmLcjVc)590jy zX@~4k@TY@zP<`2-09k^_>$(=+%~8*d-NWZbVVf3GzO#q;Exz@<3-84A>brU0GN-#t zEWXGFE5-1tPNw$FJKOw}Z&$TO&rUGPL9komy7>`TO#53qwk#$TV~N2cgs=I+<9l}v zK{bsv_H{uUS2P8k&aI;H@vd@EM7wP{YebqipO}h+DUaabp-l^j3epfTzBe_XVfRcb zP}3(7lSg@h(^Jbo&#tHGUl_D3T2kYsZAC0&2IFmR)mET~)oxKs3gfif9mfjk3eM{p z5*b=4i`|Gug@{8cvTrhl{xYqX3we3+)a%x7#gcykcF3P@qFAHxXzN^;e42T4lZnEH z>q;{-rF>O3;!;c!n%R|sHg6*pF9n@>i0^eoXN${A$ZEOu^A;}VCDR$fm_cqU!uim` z9oxB(59to2CSDwSO(fp7rMZr%(Nt#RsFSJ?I}uUB!QQZ2`phMzkqD3MR_{`G{$j-z zHr8sOw(7JcSik2TJZwU!^m2Gu6E`w1HJU`5kG6*W-J)EU?nu|lOgRd5U~}WS3K6&V z%LW@zff}(qqX4t5@r{&-ZTOB5aMoa{##?f{WVffPxXVan-ZU`bV##LVwzL$gBZlzL zDGTgF1qbcu_2_jAzaOhe{(S;Bbof3xM65!&bJ)^ZepsAK_oCE1&2aUQyseb0PfVV2 z;}&Aw7V)C8geb7o&&fp~pmu&}pT0`7Z;ewsqT6$&K@G&K*CCHIM0F;&*aXX+qCi|H zgWq;?Us|r&4?8_F{B*O;dOOw={QCkMsVm;O)sOXUKhBrYnV6o-usLR@AEORc$#GeV z;`z?Ic^j)AyfGcr2egB~M$z}|EcNzjuj{Ohe5Vr$_^DA?+-a2Y?OmU4%V|qa=ZTa` z`tU7GZ+J}7{B(bp3T3ODG_x5t;t z)y>QO!&~N^YdI}jyLeoixJ5etLV}|g1 zs%B4z@_`APorIpDef(^}F#G!EY%s7w^X_tOZLOSkCjIz1^x#jV##HH0#=!LgL-Ik1IK1Cv;0!L9yfpQmR^^f;79jFvv5P<;Df0F4{B{fX@l_zMrv z0`YR*&251&^((-pmM-n-!xw*(*8MBaV1Htv$=u#?vZ2rS746j1E)Y9+fB&4lAROtISIC;-89sr(g5B=uglGoPLo2>&G@q14gwFQV_ z6L7(*y+Gikz&IU*=Yq}`sw*1Jk4a>~wNm_0vSw3u4W1+=#ErNv{G?vv(l#itV!4G7SCM$p?L5Qr>;YfR5`(!duzo_*m*Nf` z?0)a}o(YdY`A0}bQUt%fE76XPXyuFi>Z)422u9B+>3wL5>pPAVE$P6*I6;9OhHDY&visqg%R3g{?*A)uoIFYVDXZ|%j1|7`IOSd3F{3ywMapthwRccW$6b5kJ5$9JN&b(KXv z;`&ZjmXBR(IIU@?6+HIUCAnAPYvkL{>UKs`nzye2_QVXH$;p*1yVS0`);@&sZ(ez& z_q%6VepzUm(#ScbPLJFlW(DV9Fvb3RhK+_;&2r3>cAGDi8k`!VanzUOc%qx~<`;O% zc%K!f^PWCW9sXZ~vitjgw$WG%{B(0j#A$v^Vlh$=vcrQQOUoZ1n=3Z3wx+IqO!55^ zi*CsF0C5WZI*CFw?`8Z*;|!dxuJF6KBv0%s+ep&wD}Y_sb4AboTqehThipwcL?oXI z!0T|t=cU;ora~Dc@o?XI(llfa8A|n)% zyZM>=D2{MPliRYV)-9i411PzPXPG~KM1bEu{}H*qT)t?Idi<)*(k(v@ZfP2!P@t5$ z5X#?Lzr6Zx^8&Db1qkEKrj)>=oPRe_)Cx$m_ZBA}hMaUg3TJ|Nm>(qgL9XY)`et13 zYISG=@_u{X3|s=vToW&_Ks8y(G3?eVVO`|upcs{#$NOM~bD~nY#KUV6Ow7EJwfwyI zOp#K7E(o}GqPTDWtXG7?062gsrroW!=)(@ESMyU$pn`Q3aRRY+te}UkS|i=F1E@vx z-JkP8$}S+Mi8R}C2{h0)|(@aBkP9X8>)h& zEi9}{tBxArcc{6&@i3;Cn3k?v-c$E5SoMH87j@-0DH#}phNa!4QZkB(p_Dl68c3%! z*veKVW8e1%k&wjoL0IRYhSLS(fJWUc4B$IOy8ZTXOvR%gVolcXN3Q^dMlNwn7yN2J zuK?x|KjIrRQp!%MGcj9X$^}TOT*)SG1GUD)eC&h=P~P=6Z;2&4Dq*ziYfob3O+PAR zxVCyvtI>D!2`(}f8v>Ax5u~%Lo-OU8t`H(O)>Cb;f-hHDVe$JfWuyuh!S=Wq=i14N zwBlaFhFSR;2sKvGZ5m*i8#3j;+K@^Y(xjC^3HrfNSU!wP?g54cy6>c-n;RNC1%$zo z%c|d*QA*9cU&k|#8ln(f5fT6bCAskLb{SP1ET+1IQ(Z| zT#AG4+LMI&K&_FYpzN$>+%;1YyOmC%`pskzh?(c?*mbp{`H6fni`HX%)yQA4?4l+bg`nV5BfcU3?KrFZ0O?G_l}HL?nQ z&qsQxMQoaytcMTZ7M;pjoO)ewJR7U96dnp46fx%ZIMnRK3T~tXy+Hb#I!>8l@a3f| zhD=A7HPcI%BVogKXhiOs8%C)&1sOOOXC7qX*YYt(CcbF9+c9U4nn6MFo#M>BNfl9$ zVilJy#lV1Mr~rvsl;WaRTkZtU>fTMbOovy0n!U&LA?V9jYTrkw$>X0%mJh5O@A5AP zV&3&&VB;x%Z6}SHC*dH=XK4@1@g_!a`6*|gpIeQPV&2QD6!5g8QP2vFus@CCqI`kJ zn^O@dW~lPCtWt7Y16*iz{RpMlUEkrSDNq^@^&S#{e;k8964E$m2~iYF2_Utkdyq?$ zli44;I&q?P*&XQkMu2GAZ-N@JMfc3%u(pON;uGU5$0=wN9a_mBnjKi1sQ#|usWv2i zc!hge?|N{+34D3YHQ)*WeH#*M@Kj+DmM(W3mLMS~TYLvm=mZ^+5CpHwPjmeCrX0^( zwM%o&naWayW*r6JoV)@!Yn<}tX73nTRXkk9WauIQSAbjYsaF8(kdOF@%{A|Xl zW%MoRxiXg2EMy6J`O)SK_hujSyh2v`h%uAg-=6cNHm{N)w2MKfVCO&x_*UP9K*H%H z)|-uumnaDu=msj1pFR^~pOCPUQ}{BN*3K7WG}fw}P{fmHgb3v46;*AW>oRA>y&TgC z?RyLEDq#}7y%#I}>v!ZpTLU=zTEWy~yfoI|oXn8M)@Avn9Sd!99yBQizam;kAmH z)Xs2HnA+UsSJnOUh2LIKUxd#tG==Fq4bt0P3J8dabZV_mubCNtR{TBX^|ft9@re(xI=4D+Bzf-hV1C z8<&DObuW``7A&fK*gUpgd(@&u6))yC1t#@ppLxAyjYH`P%c7IBDVGIThK>U4n9q#@&`K+Y5E{aSiZy zkGAe!ECYC$7GbOnOkV;YU0K2xDcFWgm^aii*&O@Yqe13_pBo6lM3;mBOJ0eT1 zA=L~A8Q{va-{kbzL=qQ+VF=*583{J^fQj21T@1WTHQz&*oj(laxS(Y{uz9+$hNrwv zyrf+oXGynSX~+#Q9{TV?7Ukp5R$-pY4F^0}{k#|+z?X4*N>Zs>IB%NwI{hi3St*_N zliE5o!RWV+UIdza6f*EE$9-5lAR1cOXfb-UT_G@Y1^A)ub?0<$>7GZ`_aUw94ECVU zIwJ!0r=GuT8)Oa>*ei_Ng}Vv2QucnzeFN}J- z@*iw?sa*ZqH~c~azvp%>Rr?(YsC1FCOvT7?0lk!3fk?2$Y_Ii*+N%i&>ZWxNV_tjX zHP&k(e$|~iyHuXE3yX+el4*CP3Hy?lW_=DLQ4VUU%kC+{Zfd%A<9id1{r3(?Xx|3} z@!bxAGSBzmI4ylKn_kXU3R3!J$`L6_hrU_G0XYISj7^!j+R3}L$=h2^RgJIq8ni*& z$6~iHcyo}ecyGw1vI9}iK02)PI3VH}Qll`RDO^N1?k0uIF5L|^ly%lS@e|`zmCX4l z%aqfaqQ&jt*Lnwu?bulYvF7o>W5jL4uq6Em3HNvq1E&rT!LWMr8-)0ssNS(|^Nff% zGaLo#bav>%ehI7ZpLqmvykiqy!ZJ0kc&IeVw{s?j&wrUamxg`5hxquE(RKClIo*$! ziKlj*KPToM=i0*^cI90Gu0b(>T6<`$^zL!^0K5SoPvEV+z2Dyn8=}?GJE#IME65{$} zxQ)Im0J+KQt?Cm{w=(E`bPi*y0WnpJf{Sj+9V-pPrWd%?8P{!aku)8cN6f9HzZe2Pf*gUw z>P6_By)Cw?4;S~R$j#li=dwluXoC!Q74S&#|DvQV zvAkFQ0VCk+FPGF|PDvReKchP<-EGUFLFJL4G|k>4?RtymD>3))%Aa+}HCI3&poO2J z8KZm%8l)Wn@r1{fIQ2O>vG2>H1}+RKb{8pIpARgD(3L8J-|PlU~+o*YB}kKJhgR*qdKcG1}95&i4{-Yg&JnpHntMQ|o^5 zlK^w06whJF7jF>YInxb*Yd!WJS{12Ext^1_(*}2_6%FNY=lr1)$3m_9Lx^#_nxl;Z zvZXq$)y{37pJ{7*($lN9OAUoc!4kr_-UJEvWcr*$KcjY+!fYhigV-f} zgB9$=L|;em-hSjpQY%JAewWK%Ao0r;;5B4lR8lg?$?=cFps#e=nYI}$e^e%!mj0|U zJB;^xWUFDitHAF}G|QlMN@E{yW&6f7n(3%1eqX zM{q}EogTbo50FE$u zT}X$NC?>|*be2`pWAfAQSi;vBUuHhHdJ}2(oSc30+GU@FIWFJ930P`))APbW!I*aX zHpK0Ysp{irMj@{eN8bYjlB)4=x7z@Z;lYDKFE6W^wsS2lVK{Yu9seh+3%6d#AczGI zdF%UWZ(}7XK|{SY&GY^CTlE*r7ES_K1M(Z>pRNFJz8ETT|LCcyAAeZL1b>B=KrKc) zUje>Z4F0?wx>~R2jxD5yJih5@&2CNLZ*S&E|cAUp*0ukT^vVNB|ASOr(5R$v~Pa~Eg2bPW7{RCWg)0pU*)z1q=P{EM*6P+ z6m_%19T2hOw@rq73`NlWd5V0(R`!>czxpi@XHuc`Z8t92KVU`t3P@YxPY(oy60igQ zC&gAV;trm@`ujt216G2=(S#B`>(AszrjEQ(L@os&9pK@0zRn*E)$y-RcJFg3DbF3D z4sEL}EE{85$L^1;K?gmZ;v|DTxu(&%`4QW~+Y4G2N>!A*(Ia8PBB^-;frmyou7p8H znc-LoYUd$|Imz$UFoCu7-lwn@6j?q|C4jf;w?2tcTkk`tx38WJ>_)@sjNA!oCC} zzb3agq2ES4_BF2n^^2+fC!U|kciQONV`{h*OltyXR%Eei&f#b+m6jP^f>q$L8e!_k zClYmhbwrKtauxbIksd|v;tM2ojbzSpDcHTXK%L2zH7d=X9$b^s#iw3_A|~-WrZviZ z5e0Ei``3snxBf`Z{>OTOe{Z+}Ytn)Uyi4p(MhcZlt)W30p*qUdQ}-r=-A|Ft8KPFz zWX3D2DAj%-ATXV|i%k99_pD?O1Uo$bh=Smine;L&`|piek%cE0-4UbLMC(^ z1n7?zCr&=TbJ$f<>>eg{tWV6_Bu<2fuX!LJGLUy_qJGqIIWj-ONMF^?`I5>v7H zACMqE14^rlsM|O`_%9v ztL*Y9JV%pGTO~7KmQwSMhD$Hg|8&`nsj_`EtmGpJLT4>@TP*!Tt1*`uDYQ(_=67oEGC4` z>>@VS5Z4%oC)C;D`GC2--EHT4Ebvp<6=3h-3%c7ev!*Jm2L^cNniCJMJC8P&;|lQp z(ZW%MC_dr9e;~E0J+0{yH;5*yt*`7&@I$uk=$=8`w=)~x#56jr=+k3SJB|{+ zmc(P@x`nUh?+A$LD?=VUZe{PVQT-|NkoyvMNlt_9eZcU9HcrPG&<9`sW$J$gV2dSs zS&FYaPCZlxvhPfKG!9)3oQ0!XI?rb6^x0~XEJ_6?Z8zN_zY$q;jSlg8mb{~sw)k6B z?6o0UH|U=^;6xE*E%~zosZ0S99~Q9##E(U=JY0_@bZ(!Vqo$&8yZTitXtj|R^KnJ` zN9ewG>QAx?oH^Gqbwd76JzT36&<#FdcnXaH*A+l`#G6Gm#B}CyG40E$aQHMd?UBdb z!(jWJv}M+UjGFh)cPA}4Y(X6wt%ogMJ!YGsxB5qRzW+#XI6s)F;N7Mg3V!3JmZ(ps z?FBJ;4r1tVmZe{4(bJU!xSh^&@RE7Zx&i~pKFp)cFzh3A#y!2%v#QnYf!bCTx1QZ1 zw~~k*5TxM^S8NU23pC5!Ak8q+a`v{nt6|==*uQ0&Fes(gm>q~Xa`VDINV3qRpPJ>8 z;_udt?~_MgBUf>3wEm74ee3&#w#_xU8T%%pYLW%jb4Y<+YvlMg*{k2naaVA|N5tmg zC|y7#n1V%naQ)dW#iuGFwx!ej3 zcF-cC;6;0wQk05~4*QgC@;@`J|B!O|zj^(?dR}}tvZS86?)H4;xjwEx0b&pmx6O{L0_XYz`!=gw$seA>)hNfzv-Nm0$4{aj%vs8hCjN zDyF%5zM~!>-tc%L7@b!$;bLQ=y#RX~N)#er!~r2DRAEjpQb|#09!PhYcegH>zii5% zjx~vVLH&G+yOc(?sIFW@UvMXXwz3ZJe0tuo+~#z*rHhOg)Pf57M_S>3SE~O@SpR>x z<+esGy`wLK`WMUvYI5tc8Vo?df|713H#jK2^By`r!e5;6#lPIC<*CBAbMeS+H%i1#Ku~xmP?L3yW4@IfK%`Oq?VsG--%yGE!7=`GkE^MF E1CXBny#N3J literal 0 HcmV?d00001 From cec6399fd9818b5ce18ea01167749f86d8908bc0 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 17 Apr 2024 20:56:26 -0400 Subject: [PATCH 304/329] update --- openai/handwriting.jpg | Bin 0 -> 53358 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 openai/handwriting.jpg diff --git a/openai/handwriting.jpg b/openai/handwriting.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e92adf4e526826e8e09025d3a256edc6d733b0e GIT binary patch literal 53358 zcmbTdcQjm4`!+m!CnS20mZ%ZEjzshzh~9#TGC}mgAc)=x5;ciV^wB%R=#%KZ4AIMs zIvBj3=lkB@`_K2syT1FJHM7^+`<$6;@4fDQ@9Vnv-R#{e;DM&9hAIFD2LMpNzW{e@ z0239k(+2=RTN}U+004*qxHvQby!#T){RN100^t9*3;^)ne**w`KXCp}?GN1lU5ktR z1Mh#A0gL}>c-IA>CI&3x;^N^v2H;ZT;8Ek;!2s;{?#hlh)gM?ipo|I;{u_w@jLY62Qg(HDdd^=yb9yFYp!lKhjH>t#(ht^QvW zx0tO*C-7r~Kj)l2Xz#vdXViRMpfqG!5Pw8X23In%TX#|KQ;0wRO^dDR} zxW4}ZPK{5%DN0E5LXXJC{o&*1A;gbfCjYGICgBp(N734P{3WI17GL8*{|DNCk^SER z3;q9v?04fAH%)B@o?`46Cdy1@Cot%V}wM6|23lj*NFdXB>yq8|FOIK zm*D*8NjyA)`uHn|HHRFoO6FaQal>SNse7%)z?wgm&TcJ3u=1 z9bjAE2>S`QjqOa^SvF^d3FZ?ra3We+l_n}_+sbHUmDy~ zIS}Q(T&{^WFHN3R{XsM056E}tK4%~->)ra}_SI=9ONS}P&)2?qK`XfAb@dC^xZ?ph zaa3$mv*Kq@D^pY$@(vKuA%Xd@*T+T|vB|7uKKDD!k}v)ETWFrB8hG&;e?Sb6=I*vk zmV(CQTYEx&BRp2;tYPT7%&6!5JpH5Do-OfljXjM`HC^q*iF(4k$Iv3B7gqGmr{yHf z$;kuzfle^Mr?y6$nbQG;E5FGH4c2s_O51m}pL_q=OunO0)9c=Z#!F5OcudA|C460F z2;7Cn#feQmObRT8z4D(bS$Js?^YusacwPOC+bh)ccj3B<#XbiRZhDI826ca#Y>F<# zi*;}7#kCcLsvXv%aB#L%&COgQzL(J`=f4JfAEZF#Tu9=j{QByHhG^UbepVta3=D!> z(V~}QI=E5cSlP6&6#VMEmfo^7;4_=WXKYhbbHk5@TljS(m*|p<^7&{#s)qe2;e4Yp zY7Qqze6cg55Ki|z_3?^&*tC?>*l!JznsO+&nNZArWc~Ns-J|=SW_N^M7l= zU-|6mF0@&*Fb-VtQff%RkZYb_-S$LA21lJcR5nN=;DRh=Elu}juOs8mAoAyghcB<8 z-XnqpWtPd2!WM&wN!#C401s8M%L(pOa(oBIJAfmuE(6PEW;|5P^(y+`9RQ;D558kj zl(l|D1iCudiV%0vVLYQy2(?(~Unq!guLbbUR0@rZ_(QC%s0-vtGZjb4vd z#8@4#)s^GCe+aX^IL#kW{CXlDBlSdBZoQr_kIRD?uR-=!+ypWby{2~&uJWMU=CquA zWCgrguJH|B7x%}km6wAJidi#8d^BW#u4^VgsNh(AY5MEktG!Xa4E90;4fgC4-4kZ) z9pI{@mZ!+vFxo#xO#m8&K?Lji7aRi_kmXw3ttONnaloq6U)=+AHggy?w7suxp>~BEGyx93gIy2*Vtb55q#r5oOGu~7; zj-{;VWW$)JtJ#CuXUyQfK;UXS|A_{wGN*7CAKK7R-2={*;Q`&`GVsday zCW0Q_ZjWGgxfI}cEJ#{I==RhIiyKVUm8*W1E#_%RebAKqhyaTkWM4o{! zWK(vRn7-lz!JEzBhUw-~(^uJgJAoPhGEYPz?f}e?hiDOtDK8+od>?eAc98!dVV8&J zb#)$6do2UFKiT548}|=CC($j5gF{F~3IEC9#>wShZPl*yFjK=0)Tq>O;&?VJl>dGy z)fw;=s4cts8)M^b-M(Cq#p!!ao5O1n0StHGvr{immvX2QpC(-@!`0YR;`aJ|Xc$@R zemv#1lEN~T<~bn$M~NfbHl*q~KmX$`@vQlfIrh6hw_B@CU*k@({405aVOt8s@J)Uy zq0c*-WtR-w>bJI?0(E9=8{_`pruF5(5$|->x!^ZvJZ=##ozay>3uD&QV{g$LeG50G z0ws$D2Ue4Fg({`|5vf0S^oChhLn9MTH&Pwe4d72co|voI(R5Eqii%+3R3!8EM-?)-|pY2#R*2-^Cg5VwUJr|vn4&yyX=EI|kOCh)Wji1Vc zoYj8;ndmn1sIjcRNMPs{y!Tp$>1n07#sQay2Io9cyar$D;8FHPi6ViPOCZX#Xa2Wn zROXS|#KtE=nQL*oAM2k*O;@10Fjn2JAV7*XF12uD#ZvFD9nr?6lyg+lmunFO=fUwR z+OoKJg)+E@cC}rkThT51%CbNbBoHSU|1l9V-fOZGl4R8JQtO9rU90WrST(6w95VuaA! z5f&oLZ&LeRA=P zw7C3>EzupIvMqEEsnQv#NZgHffD0yzn_0FWz~?6SpO4Z!s5u-{R$q9T-6-!>g-DD@ zzXQ~(#XziA5%Z+Wn^)tAJ|?7!Z;^bFI`VIh{a4-RqH_$SWy$+*;3=?drZVu`g|Ew> z{{`w@NuWJpX3O8#z|cI>5t}LR;S%v-CwhkXVz*qqBtP}uJm=#?{Ox6S6yE`n_v#9s z`9Wi3uajS?Zd2=^C-;8RAeSgl&J>S)Zsg8=mq3kS?BrO1nKo)P8Js@W;g`9yZ?y2@ z(1AkV*AAs<#d}w1$Wp1xRp@?;kEV)+Fr(nG6{vB{sf}ADrsHrL3?1A&o}Qi?F`5<~ znH}OUkLCe{k_BY$6l(`4g4}*)7o;lgksA)g*@&iLHP%tZej{HXtv4;cRb)4L9sIj_ zjF6`RNP#Ay^@IiemfFzG5xIFwwT~~MHFYP@j(;qx2gvrWelqwop3qLBA4k~sy$C1^ z)5n4dH8_yrERe_wQ+uTgQ<}hldYA;w7N-d!Q~d&bJ^su`zJ^?r+bi+m`Ep6@SyMvH z?YLcjVyY~lY>!)?DvW&5DBjpXhv^9NZqv@Y&`hG zQZ%Jr!NjoD%iprBBNmD7327d8fbLtuo1gWpZv8-V31&klS2Oq}c}o z6JB}ucX7R=#Iea{Iu*{5Zci2h{aoB>(Jl(P)t6VyY6gUXbjx~tYZyxy0 zS(j8bS8X|vz?=FsMPu;=A4}jbJ=)iW%FhPh1vf3Z@5BWe4o4Lp2^%>!FfBi>yT8g_?K%KCkY5$Rr9!y2lyEVW|3;)L?n6 zdlS7NC+}iM5jQ7$|F;suV?(1r#GihkZyif&s{(<0aD851yxiJFF9ss#=Gu6Lh;pkC zUl0=2nL9KyHRgE25e_F`j_>DRCu{W)%lxx+lS6TvQ-aCwov#>a1iovnKVef7`Mz0i zT7)Ol&sr1mSr*5Fvci`hraR9hLWW>^-4cju628%2(2e1GTy+5%j()W94*PBFxxoA_ zDMr8qMfJtCQr^-_xIW%&*Twv89%)hqZ1y>aG=N(xoS|P`;-qT7+})uzJ91E z%d^y(j!x?ND&OqK!<^iYsAv0K{yCx0x0hLtUr(Dac7+$$1#x=R?e% z@`Tq3MRAySe!d!{Hrlq`E8cR6^s}OV7xj^etdQ`#n$t%~<+KcZn}8BHN4D=)+*S-` zrArZiT2bEI0{aS6QD&uupOIlS-J(!9l0JPN>&6)m>5osI*Uj)9fipMlS--$kA*~~@ zoa;bJuo#TZ!vd4*bg=*7Eg`iy$)=y{B?03MkLYA+l?Zw!603z;F`ZO=1`*8~ZA=TA zDR-Q(|Iw;GN!Ro_?$UrjhW}YrV{)$c3qEFFGLyXy#-00stvCv+yn3Ao9sISRP|=?z zF;XG-Z7c35al~V+n^o*Y6ev{~@LG_3U>pGdq_>xPgfZqT0EfMp@gfs>>YXqvHMIG8EQ+Y7i3ujR=f9EIFr!)X7!=LU{PkJlV~JJBh~wME ze;c7j|K#PmTmIYu!V}kosCo;mK=kt+&853yDL7ix0xBoh|F{6bb%&l8F_!9M;~rhP zI^%Cy$WL~1gPAaH=g%;1e#Mx_)^QzTH(Swd4qMjp5QYPeOo!xmuieu`GC1C$cTDoA z<$bHv)?@Rc(#;AdeT{;+>QI}>1uC3xo1P5JXmi^|7^|_1;#W#@iSMdD&-~*5kXgvu z;1p*w(J^`P`MCgMCZYbMGcnld4gi2ip$icq5M`Tk2`n}D53^Yf_eU|89Ob2OhRev0 zF+Z=>!0XniqsR(z5z<`;)y}EnoV+*cY2g)x1={kXkptWZ)CrWHxxZT7zT^r_$-AH; zdvgV5lzEmmzth4-8*L2gcoOCD;*uwR{L0~D*4RINstO-)x^~%?tvaA`E8O`6()cm! zm?$w7U2UIsSeWdi@u!1U8c$kg!+5EaQjvR`{n4o;nmux@C6rU(mC^A-{O<-!+7S2a zC%a)tH5hwzC-ftM)r!$%aIJXXOW(jm%|whUSa{#_^mpFe#rZGv3XDDO{FZr!FXkL> zIl({u2cAqJoq)%FJc?HJ!JW+k~TxM&$qYwpzQISBjP>WViQ4+__ zMpBe|ysyJ4nL)h7ME{PGGke~e)t>Tw&-ad?O{ph zRzNfrJhL#j5QMtuI?O{Q?K+N-Q4Bf|0md!|7F%m$z;@YTF~^qntVU+qDoDuhP)EI(!5JM$M)Y;yv}TgB$#X3`puUGb1(skpk8@L#Q_A&cTXX z^e2XLB1N5>^g36@*4x-hgC)^3^D<$g{|eZ32f!FxlhlLsSR-oQp9_>^m>@$Br@7sk z3D;>3A@AZoR@xzf2bOChIN&>g5^6eLP_`&bK7-4mxi-PPp(c*=TFzFe_aX(-uB*9o zPH|AZTs+6w_=%s-(8xMNM*b=|PZPBQ2>R1W5yT6Q&?Uv_AAgv9Tz~Bz0di>cbnA4I zdk@%P1KxN~K+G_CzaT6a&~JzVnwbtdE@ZW4_fcR78@0~bC}*RzXCfPP!SUNG{vKfyXl2V>(#@mU)!0J^z;VKC&Xyn(1z)?9>#wPNn#}zyzK@?=+1D6 zRB-W5F8Ab*+NQ}3#|o|{cgDI*;if#6K0~Ps#cAll3iJgb6J-OQ*CaC_uFOral~AttEB@2 z8-`JO_{1!dK4FmzHgJf4u5);LXa25P+F_!DSE8?N>G`(@|5i_G{~n$${z!fUViQ1Z zMtTxdc9xjU4T53j1t)?YnyN$t4A>>L33REm<%7SK1I34Ez1o8^-m` z-2psW0MTK6;@CpKwt3#b%%;`2LZXqzf+xZ@4+b~ONtvHed<{IFYq!3M*$Cpq)FIiT zZQKelFK2X9t)@R*ON1i0<4LRa54b~KR=;|q0rwD?sId2*aaD7BOrGs^nc21g^Z)$` zVl%>XP4H5_lG}dkk zpirgtd29adGf)12t(~|1d%j$4|8Qu_*63C@U?*`oZ}(T5!>YWmEKHFD6YXJ=NaL*Q zNoBLqFAs&U)}NBnd|r#fI*sYYRoIvomZo3G!a&C=RKU}jJ3vhp2WHp?%?0bE@@5t5 zYB*1{D~e&< z%P?v^sk~`{C8|Q$i|l`og&0zu^tv4{$j>&C!$t!%P1PQ{#w#g+e-oj;PzdAi4_h`q`-H z`|yOEx+!FEl#E&!)+AMm)(!6<#@f!DS+WHE@t-PcFUI&dE|0;r^jtvVHJqXK%(WvTB zPHGCyBKkDA6o$FYP0Mm;QV-{5t`$w+$o5Y~>=~X_2qH;owh^OwbhpEIfCoYR_x}Gu zhw$!j;Cj}}lx_W<1+N@lM{XLvG#rMnH4Hw%wSD}Q->W<4AtgjQ4i+m2rW<#G5Z9h@!OO#85oWR><$A;~`;*rj-x=-xr?j=k& zgq=U#okKVFuFT!=cNwC$_nlAWyH6K{~8k%dMDY6xi>Pe6+!W4 zF4scL?hJ0rPZ{LhyDGjtfH#|@P|9T*&zEq$5q(uwy2qh8&$S^kZ?M)Ff6EO~&;k-J zFi706zkyCcOGiHQTg*$n3?Pcn(Y0Ht_p^?aH>kKBTb4(=M83jg&jB?MNu#9v&XSsTa+ydMkIg5U}av@^I=1TnXU59alwQ zL)6P!DdhykuOFZMrXUphyEs+ zno6q){w!GL{m8OG-#tdD${*jk5C3V2NpLz-Ka-J{ocVA}Mftl%tyoPiWn|2{xB)ECWm^mbd$3wh#` za(J5GpR^S$e*Vtd&@Ux(#>m&Gf<8iu^l97myP(w82znfA&73OA_sk}!o@>e3GQW2h zAT;0(H+NNI*8XribW4JFg-1a_RScM(p7l4WZ(0>yHiqe@zh=lh>Fm(QT`#3w z7zGQzci<7;YKFLA{G8Ak-7af4TDp$Kbt!T;imT8UKUzN@sx4a_7!4Z0mBXx_BCX?B z*nj-Z>#?H7=#>V$%Da({3c<9BUM&zW^BCFH@qHTDVDTVgcXl>TzcTvn9$Y$NM5fN> zS0D&hKQ`}_4+`89crgt{&^j6g@r}o}R~C}{iqi4%u!6^Qb#z%}eh20!q_f!Z{ag#C z3S!W*L$PaNB+w!nw-X7hvdnU`*gny^5`s`^exI<<4DZn9xilCx)YjT^p1ad(Fo|%s zt6_eMBP+#(K&fRpcY+1VGrqUDJpiHyf3U!?U}cB4MjGXzhSewPg>=}Vy!j>bJKee* z620rblYmbFM*PsroMcGK{MJaOY|zvlfYi(6mJXxb+hWvNJR8wiJ!Rjl&YiI2-Z<7s z{(I!dXURrsnK2MTFo*);j`B@__a7*@KypuSpo$l|$7k{ZHP0VGhy9;X2HPe(#`3T28!Y90U!hl|4;ly37fe9h(aai+ z?UsZdf528ld&62;P8lCNTvnC^yNyYSSpz|T&jb+j^y{iQPN*v?5-rq4FWCBk5sN8o z&ZHeY=6v$#iFEfn96Xj|(80~i3#|!SR|)fPLMBPR>U)eX9p44A9abtlWbXj|A`xs1 zAmd4iVT1GQrjLK%Pm+!@+yU!9F14j%vdq+!ygJ(;b!!K}_0Ii+66X#|j0Wl$NDa2# zMIZfkkowat$P51ST#H#zom?#KOyZ((4(C2R)QPX^PJp20Yoz%$`4;}x_45+SA_^?nVM5Gj}iiuv$i|P>g z+s3%6U~DoY5&OkN`0$Og9YxH`4y*UvbuLK*>O_3fV}MHFS~~(}JvY;LD}@Ha63IY{ zB2#T3dD)PuQ#tS4=QI+A4WuxJbm0J-#)p%}JK7`fe!)oQW&#gPampcH1;#17MN@&!Ao(Xk>h!p3;#HX5i5ziki4436)xY9H3a~0(JZZrXrtZ?#z%Ft%{QuDFOFy ztz{352C!x>j^dSwit8U*uss8_gzdd4dogq<$+P4LsY8pHqNEN8))5ro$I}wiPz9Qu zGi{SToTf$sTkja5E|vj-T8RxZ~^-uJL<2!M)G@t0ATvp)lE|!Pr-XVu?65*lSyG$ zY?`&iLB)#gFFHWUSWnO!{^jsWOxNW;UPdPTILlxV+u%+8IQm^nBu!^~P;jed>t_Oy64NicTs0^6fJkj}Q z`VQH@Zfy!{a;(D3T#wK#8h+mk{2trgtfct@X5}5~z5ScpF!bp3JtK_gs^!Z&fa;{z z6PNGee>zOtG~U~e(|C}UzLDANxC6AB6hHpE0(#BxPLlESUZVAmslLFGXjx+3r|uz9 zdCv*fvV6r09E?$-M=oboVNTXJLrYRkTbx?F-q%kR{?;5#bC1y2aOw2! zjOc)Ks2o>9nj(aJr`!*D}l~?_2K%D=0 zU)XpNdxFyK5updeR}DL1>@lL&R^ag!?dVewStx8a%Hyx_)aC0jLW^o{bP8xYHh6x$ zdk2K;e9MaFrpH8SIeRs;{vG7_vcDLXI+L6_!ERh`gX5}ek>r^qn(IHsBCr4bo$YTO z4TQd|!*pK#VdNm+k&c+KW+Q^j< z&vRD{S<%+t_zgUk*kFw+JQ@3D|N16pP5RnYa-s|kDR{LWc5Mr6=*9*;`P;z{F-H}L zv@yzD3q}XJm00zSXuCzN45@7|d=IjfVPILE@vFd!qNU;?-hsi$93Asf))X$n416TR zRFF&0%P1x#bLmALnX^hV-PBHrF>IpVFdzO@$s}BPbZ;&M9mH4LcM@}5)~$+_?bKV8kyL4U2_K@%g{a+ z#6B#xfHTc$Fo5D;{#{hPkV_zcM?;G9xhqd|?_<@iG*%xaS@8?g*?Vrq%2WtVdcn|= z;uJdaO^DOvvy#920K$3oFo+HN2@}3{t&4*h)|l{eVd^2aNnXo2WZN(=G3hB)qQ!Zm z4hQCHdi^*WIpbtE0^`9m)nIP8J3$H)BoT1^MtVKyH})2Upsr4s#1NJN^IK5}!@mxH zHqR_)a(MVN()|u#?&jL}YVni;@z09$CsqNoL*JWa9VTYJ_kOHWB!u`{T zpD3sXYOGSlbTDC(e}fa%P)PNiwbqU5l#M$;d8U5M2IsbWw$z#x&QAy!Gl7ify+Y2c z?OF8Pt07T6|CGE#)huT(0LB!n?w#Skdl_9DRpwy^kURrf>}yWtH{dm55TvjhYJy@c-fKR!y?Q(To#w}WZ%9zf>chvJX5=USGHGi(w zSrg5=|4LM-8GKoUQh%FUe5xUZ`mOc@u8HPFZ^5r;CmnAIwQw%*tXE*y^w%4zb7OwE zt-2AJ)=^lJwGw%$vOdpXag_}}#tBC!h8nVTK0xjnV&D8)g}-Lse~4ym+2)E$lSlB` z<3a9hoW;*NBnmivo>FGF#ShahC0`_m-0vYykM2c4f?EM_zQJwxUwbCDP%fjU_QdxY zV~dCJk5Xb3%o(@B^s5i<=@rLzZ$9)2Jdp}x{Cctgb@scZp`7B%$;gP>yR{?PaEiKx z0MGXu{IKNg_eyx@gN`RvvHvjU=&|vh36r+kdx2VCt43j{+RKcE1WA)(=3{TA$BBhqqd-%bt9zqZ<}UUs0Of1`Tb6J5<5JbGeOriHQFcLO|H;?ZCFs6E#h`gCq_Rk_m8|H%KT4}v5Y_Q0K z_M0DyS(`;W_~!%_R*(^v^TDlx|v6vMA-l^z*rk}pX;(rh9dZdF?bbo2e7d8me?yK zqs`WDUN)2Ph5!ni);k|9F_e@a?K&|fH&o==gNtDu%qX9nn{uSUw%4_)4MX~ec_|1S z@IdDiWw0hZ1Aa?Vk3wRm&IH`{Yu(R(a?G005VLp>R1uB~K|I%k=}f?l$n(^5b8XSQ z{dxOlt+bN@A`yYQaLAJ$@^|mi*~SDY_U@MAD5et_`vWf_C9|TsV0XJhtw&a`MWHJK z2g@(;@xo9r5EqwA@w3r70FH4eP7}f>MSyP+2gGEC$OTv zYezU>IsKG#OJlun-(C9JbLzC+g#pa{FXJ8%u}oWsR|Sb+r7>w$OHLh++R<$JRp#^J z^FPy{2I_E9~^pH--P|Bm3?ERL(88 z$=-eeqs1g)Mw>Gb-QrnHw&2C5}_M#4cn#`~&T2vk5b_zpm zkQJ6eTsuG}toB;FvM#D2t6W9wrwR?13S)xcFBanb#o0SR!0{Dw;=CRsJ7Mm0r9IkT zsL*9COa>-T@PylSu@zTnzm}gK3|v1?s;^fp3b(Z!iFxjxqN)L}suYMpBjQ}J)N6xh z*9Bs=w$dEz{!S~0913+~AZ>_x1b6c3_4bz-2Er&dsz2vdB$$h7o;mjcZ5 z<6_7qoZE$=st5x7tyZ<&VD_8X&U^t}jDxX1d84=(fE&z@QC^Qsz#5}FyPi8SQALW( zD4CaVw~Bn!-vs~S98lp)P~(1gK1Sig1cxK2F|DexJ=SFMqQ7S?)v4)_R}0R4517q? z0_WRcu6idymO_T@s$HOy3zf=r2RBx^qqzf3A3i=l+-t@%$~rO=(6(3ekQeps@-3yR z|4E;xHC~%m0pb^??)(e-Z6)V)Rgsnrv@O<&WIiSrmRCjl{TGH0&U6?{^abn3SwA6; zVN}T}(0|tS4cAxwkT$`bf$?z}hW#}rqzvPxj^c~n{nBIEv)^8IT;2)aw)P=Uxf;|(V5K2Ff7o350= z2L)^iJkb3fwa&*HWD$1*>XMgPz1F3{TB6zGu*Ob-v^n+1-5>{^-w}+Ys$WLbl(SK? z0dMaB#PYy%>xba5J~pw54^WMIl6Mmaxl?m?k%`%a_*7yEOHIG+HJ|(18PuF9#+L5* zFU2u6WJouKs`j5r&-<;ePb}x1D-iA(YpArhuDp^TS$jQ@;+~`~bmFp0<%}8b>3rB` zxJuyw%Y3CsHdSz~S}2(Q?`uV~W#>~XlJ29J6ACgUJWAX;)=B`a76rYYKx*sDSpu~i zOe`xZD)NoCd;kLue0OKTZ>7w=)CELqKz0XJ0^C8=I1{@raeEiB~>F)mLo{es3 z`uj|pXo=1*9e7&twnvcQ54m2pNtf04a)W2KBIy<|O~i(6HPASJZ}}10^`|FqD*qxP ze^H^}ciXp30R4-Dxc4*P!FBys1nA`&kscwSQAQ;yEi#7Of0&Pb{0~ zSUFY3@@~@;$2Q_c49e9_8nt zgNUKc*Nl6sVmcJ2MI7AfV>fCVp)q_;8!kl}(g4(9FZZ!%KY^cApCzeWFwSl;Bi3Ew zAfWR{M}vzQJkwVeyU()mbU`lcXl z=RTY{aHj+SK!XQC;$UOsX1++T`4ewTVIghy{rL!r>hHfUWBM1_h>FcFnfp80+oX^y zQHoRzsPn86a8`HbBae7ZJLiAgxz9rR_=i)1^i{zXiBX+&^5$qhBvnW~t6Z;Hfg<}~ z*Mdpop+@garzxMuVlE3bv_k#Qma|F2Babd4WffA_^CHgjBJxP;^B$UDT8(qnSLx0N z`rbPU^Yy@fu<3YS8AW~$qQrEr!9Sno2=Dc?&n3a#7~ZEpi%F^d&hkFj!#7Z1^j2gM zywx|I$(|^0{LX$tymec+KiY;deI3xiy7cuaN6GpRENJ-tghmG(SrsjQ9h~THg;7%l zG1nAyI~F<3Bi|5Z0hv9bbILD*?BKW!;S|8N@5UdmFR`A#AX2FIGz3h+htcZTFySfA zu*T-iXg;?cQEH;*S<{H?&&kk=P+}n1djKnNQoa`1b41dOdc5^ElW*#*VS+cIXZhxQ zF#Jt*Z3!3okd*tF(=t0|#ikfb)rc`%yOdB+gI&;p9JUa2y>5aBje@f*e9{VCo6UQA zlR7>?74;m!8R=6U)!# zyPLS(lb=P0%`}P+Y(U}*KEtRc->PzR)5sdEU+2#zZVH?*R8YAT+IfP+lG!Y;Jg)kVA(G6bn1epZ93ne+S<6!5+KDItzrwSS{_esM*R zE5vj1R_dN!M}7!L5xwo4*raC~vX(W&rRJy?;RXqK3sUwdDKzg{TRWNsA3P=ObP7}Ffx4&=3ZxD-79;`$ zD%*Y#W?>+E6zqA7{6;M>dbJ-nJb9Rx2~ypEV+6rcmwTeXc8X9SGKKsG2Q{> zr<|4vmU;Mpkc-+rdUpd-uWlIDP9WL#rIAhDmdm`CsoC*wC$B0-mLCV|YToyO>pA#w z1an?p#Uzc(g5YQJq}T6-iShExHsc;z9Zyb^pLa&B(6%w(XD9Srusxmzs{o=EN)cYCxqP=;dC^P%NN1ScoC1pR|wkj6aQw6m(Yf7vah8Z=iGcEscm7UzF z`#ufcJZfmuiJNz5?sdxHFdNAng0N#4brIS|hPooFFz7X^ar!%WGRXO1-Vn#unj0m> z=07z=mSH;~Clx-NQ{cjJA7e|O1J^M|k6I2Ri`-gTTchd8VZ;0pLvhE1l+t~Wy$XmH z#sz&m<`(&BH!~rccCq|iZo9kT+We`E$@gx27UOJ)2Zj~(A10dA`+l@{DM~aX^NO3} z2Ehy8F>lHJ-#6pC?1g2HrAf<_hn%W)=S5{$Q+F^sDGJVH64#Sq-X|Zde|$#9#h=Y? zNV~USSZjRPg#Yf$Y7TC7c6qec8NyBq;X(mb&{Qx>*ZX~Zy0#*B?wW{Zwcn#`)uwJY zB|?F{m9X-)Zyn4#55Tgwfx6n}-p8)OSzh}jK*-g2xr#H=Ozv{kqFh9JU}LLlMnbP5 zs#gS`dSdDG4Z+6;RA!-$i-}7vW5gR*({B%RzN__FKlEzu5D1#;ph4m9+yQ8U@#hAS z<|x&J3w_a;DZgFSL1ILu|2n%zZ#{nXZ_w@U6e@#|ysVk*p!zK{kr1yN0r=Gn3_mUR z$TDV{P7rXs4yCGBV^A55CKdnpVlA$)|;5|=y?)zSp zQRe)#57Lf0>brdqwk}!q+sbKvIyHFxFa9~7ehhbnyA3JtK!Y6ZyFv{bJC4xj%ElAN z1BP_Igwk4{M|%GeL?gHpGZcuA@|=IaOvM;|=`T!qUD9uy3bspn_6)o=U#{iW%Jz;T zEV;=c+3Y1_a#u2G2Z~V#cYRkwp-<8zTanWR&g0XGU%eH!V}bsXIk_yM_pM>5=7DFu z+R4@J*M`j8WcSj+Ulk+zmUCsg3e`#CB~>YflO8{kG`A`#z~M&+WT;&s)y;06_O?jf zBsJdv2RrfeBxz~R0ws6o&H8wU`%MN!jU84sU%t)<8|4t#Dd)+Jr|99G`d&f1z*!f8 zH@dodAB*(MKK<*QUGREo?&>kB_IUA7FHbo|S0_z%d5mESNs9Z7%W>7F1w9UJl*&e@ z9&fx0cxOO~Kg_@=2*xW_Obb6M;codE>w>2_K}+zxk=?_o%C(dshBLfh^z{BrYCPC2 zHVB9T^)}9=w;1IG{$AK|XY>|meMq0`#IOq+==FTc5{_sOsHhmNpB{Yt^%3L&oKv$U z(i-cP@Fm8T(kvnhotLmIjaunh37>CF#G1a7(9(MHHwa#DvM8TG{rJsliQs7Vdc8N0 zs)H5G{;IBaO1z57d`C)34=z^LRrFSnj8+hb{i(Ec7i;;yyak}X3+R@1rDxDi_1^g; z_DOoECt!?1Co?vD_0{me z`Q_v+e=u*zSSHWMIfp6(*a*$>s@TWAzHNrua5wvi;LkC#`o3p5);7*vmU$ELX6{Wf zmpo+-2p+@{^e31+h{gF2#=)!ubl|#{H8c*Loxi#aO+y^o;FsWp@I;-D`m=|%fz}XF zY;0k$Wx9+pXf|~=t-Uw*^XHBHyQ+-3n$rc`>0A7Zb>}K-r_xsc)*FLirgFRqgC3!ImsCdql4=Q3S@@V)sCRSY^%@jE(fP31H~nI&^- zG-H5Jej(ODbV~s^l|*Gd7N>CbdRN0_UkEFAuYOw2m4L2pzZcZLiv`^ap$uOWjK^}i zgvJ;`=2Ml?LB*>GMl-Nm-~3W~9a}q>vQq#Ex0`?Q6^;q7XzSBOF=!n7*RxT-;sZIG za;*NOc<0^6oAI4Sa(i#@As)kfeHe>RY4qT$omBBfYt1vM=gKk zJ&SkGbpvS3Xd4Y{x!Z>p`NP4pEMX}@wgRRCs{2Mj?V#TGVuDYa?B`7Ytr(8Oez6&r zE5iUw*&f zH$7wi`dO{gJvq1OKDBe?>Lcm2e4ROJrB{S#(b-C7Qv8}(^4-MJ(j{5jI=kF6#`Cj5 z+RgiAEa)jH?*Ud1DL`ST9WgyVGT)ww?Kf_0dO76qzbHEEho;&#j)Q#CCPg)( zEkw!)VHDFrQuA9=a`zxa_dkq7&Y#PgX%qeF%KGiRDvHPYVUBb%8iw$@N(ARULmne{ zcP^6my%*`zg}_YsGmah@1(>_DATucE`xy^K(>FTRUfae5ntcWe9fpaOM>*PlIBptB z48in)@h!Y!dW{=3+++(WbJ`Nb;*|-cMgh2BvES`WQG$yQLS^Nqq%q_2(MQ-zYQmau z2^sM^vO9-)Uk(&g)#)!0_IN>4aCuzKnf=2(mHi;lWEVTaz)efjN9=uKcYONd`U7tp+*k zm^r_)(z;K%Ic;Lg4dkaaa@cXi0#{PBLoX?iR)bBKflv$edA(Sn`tpWvj`Ca4U`6vk z(Q}6y?|mhW`!k|-yk>4#j)Kzq^2RghCz9${F1RIF9~NAoLGvB+sJFi;Sa%clg>c$` zl2FGO;7?0)^gMFD%Xvi*COMaNpqD?CgUjk=GN$!VpcifRzRM;oOXX|H-r^fjN$0(; zbvOFYp#v&I?!Sd6*s^}Bdy{t>k~Azu>Ot4u-l~3l^y$j)a0#-`xAgs`aG1bhM%pyf zQn%n;##yk(_Z=n*;eu^?WUvL?H>WniE3ER(2a_2a`Td-@$<-dzBUkvJ#V*l+Ym-?X zpe8izOoKu8n7zxsh%M8a_BY-y&|i&LW^ix?G+Y}zaTcD_oCI!6d19&n&LR|b5pig% ziR5VtEq_9A;pZSwtz>a)ZBNZz%Z~q>7uJqO@^%7U2b|htGN(VY3ya!l4gFT2VSqHG zwzPd^fHr#6BCfMiU*hxq(iZElWT)qp>UpBp7i&MI^cnqn51)I1S5HFe7wT%u(7mHN z%B=&O2pbih@Y`yknGF@BliOCjkZ@d?y<5q+*PW?Bzed>r=7Y-GW<|t( zWe8%;8Pf4x`7E79d%vQsRim*F9y`)%t0a5xmfof->K*;3S~Gc_ub)!maPyH#cc}8 zQ!&^X0>Fw(Va{R1+TQsp{hNF`OAwlHvw-i>Uvq}2c;1w>M+OxGjv9WV3mCK3ZlQfI zmweUM-j$h4y7?UtxgAM5$STApTS@9=Fi#Elr%3EFCgUOq><@2;JM z1X) zgA|=i?+gKotlAc%fhN5e7K3G$7n0{;F1sC{5Q6=^+a%H=J2dC0RcU2j++uTL<~Ma) z#1?0svN@;T3JeT7ziKLRB0eRjR!YOD2}U{S^$6ngxK*!JZS-pH&R0y750B3O|2 zJ7yXqd=S-+@d7C2$a$#*PQ)QQn@SU4pExdzsrJi%7!1Y%L2mf^vTX22$WR_g5vN^+ zzOEv4LXiX#MYvaJ=LiZJg?sF#R1Rt5l#qjJ{fu6jZyYy!D*I!%G~|?}EeZ@({xpYt9bY7g=F49YSuNK^Ulmb`Sy=Zn43w0Ga) zcq6DKo$c{XJ{f8xBj4lxuU0$ND2(h$1S$8Re5tg+^M#Ja&GBiC^Oc(S2@TH zRw;6CB}r*0HGFTd$4Lxb2D~cYpYNE z4_xrXR$K*u7LmWNKGLOLM3dwH@Bt{@jBIv;iyX9TlWZNQ)GA>>($68v2LIqhORmMl z6+t=OiNX&t3W(_$flchBsFs_t<@5wnc~IRc*!D&ObNLz3L5Q+T8*&1tB`!Z%Gcv?fu#HI}1HU zHD!d0>c1yQ%&r`Mfd9a6v-Q*a5XhO|pTK%n1b1Sp z&^5WF= z%$}g+^JMHX6~P8vi5{J<$WAL*Y}~FK!PC?e7jhMmc78vdeAr~wV7$(H{yg6y`&Ai+BI1YM;u}Ey%?v|5_ea=}HUjU_n=u zXfIkT0z8J>WIB^W-#}VA8?P#a1XN8zO$@SeM(kN=rbWC5hD;SCUkAVYt1{Sj*jJLk z$bzJ(3g{l7ooc8>p4_jk?NXDP6$A&{Ja1Ob(Q_;Gw zm%LR&Of9R!g_&N+Ia(38G~Xxnx&17@Ujk^D8DZ8rJB5~|OD`g>XhaddqTLdOjUge5LT-oOG2~(o4ygjVR zi8-dC`_7ri(`DlSVXVSnMd(813(J7GfmumvkDpCVt%*yjx))pgq90j8Cfff5l~e@t z#RHW*e4!5yA(TdDt!@XZ8Xv|JK6*41K9jhR+HHH_kzMMW6?sw-K>m-0ro!`WFCZ_O=T)@5KiSMX zi4n%dyTPf1vhnJsUw2+2t2#eJq9&ManZ(SKJ$*`o#`1cfNL!YMx@FUxdCRpBK5T8m z1$nLB?F~mh`M-RR+1+(Ji?j~+pMQUNufl##QG*IV7C}QSPnF@fKa8FD%}mh=9^q1k zWww8@DkHtj$yi5RXWDU3)Jw8#$V{e0E5?IZ4p389MwxtkZ6ClQTQ$%p*6IZ4-3fMeFNQgW(149c>eYO( zwWv`u!$SXTWz=I>XeI%=AX()(!EC#t0u>?n@GezE7b99r9=`WT^_9|m6TInoJ`?im z6YV7SuV?>)^SJp-_?i6EO#g9S?uJHH1H;-7XNEzkWscT0GHWMGlyepy5mc`H3cc!8 zqai)X^l9pk#t6O$X$IKy=Da~k&7VK++}fyGW9;o@2P4F98Hk}qpf5$TeH>@CZm{N9 zSyGN{723a0BMc@_A>&7z%p*3uG>6d8V@phA8mv2BvhtkG_E&d|2w+^#+1gR0!(X?6 zI!Z3DNr}-sIgG5tW$dv;&e<6#26gB{PVOEeL_2rs%IgAxRa+JMC?KZ{rg$?9AOM|E2*e^8+ z*225LH3mnWu+|@yy6Z(bdFWv7*XZV~iuN4rfq!F%3*mw2^h}AIQyNu3;@!Fe(szfe z3_zR!4jr#P$$d!96?GD8=Ns=0Ya@1vgT@V|SPhp@c6dQSvQl>N?U;NzYIoAN*Ou1@ z*Z%xAwU~Igl_EC{NX3E?Lfh zn*jt(kt(cQDz6Q7RoLXQ*1A8Q!vJCFvC@0q0kVrFUQi~yjgRhiH{&Mdeh!YAM zqpXoyUR#Z+Z;NW@+2%6ObkTzHz%)j{xQ3=jU)yBycc+10lOB>StU>=_s9zDUiS_9K zKOt>>%aKOKa^6rvpnfI`wLv3wWjf+cZ@89;#%}rqf9G@wYhAb%I zCEZ|r4kfoT>Ge!(P_Don9T~YBxzwZ0UTCd7Q~dwOh4g8RiAK%1jcTs5bXQ*rJ!yxrb>d@9^vah;gEOal!JF*hvstW34KC@nZ~)J>o@W8lK8C0BK7R5r+1h_tcLr!)2<3kmcZtZHT4M`j z)Zi6e?zcL9HcSwuooH(qQsl~SuaPZI!%IdMSLmM&wX^C{Zub7PQ`_T_GFz1@1 z-cyzYM))wB{04Co*Kzh^zU*@)f3`KG%OY^gge+)yS(^ECxZ`i?+UhX-?FdK1mc=~z zn5RX)!WrBQwVhg{v< ziQdF#We57NRj4tR*Cqe!>O}Ws^y5c=nd`TL*@eOakI&pfVT$BNFzY3j*u%v8*zj5&k#h$q(@vS-W^DLTg zd;2EAnmUZbIyw4wYMO7<=py>?s^05UhL_VWU$^I{4QF3;tru~i}SlWatw2% zZ^xTh;#BYaa~E5SxOVHm^9O~1H1o%bFZhH^(4Pf?RRLk@-I4!c=$i;Q-#wt`^<0j$ zaSn%3hTtzEXC)16&ArAzlihAo)Y~>Qrt{b)DoL`w+!TABuU~BU_m%0pyvquWX8g+f z{%Fg7v%WJ+N_wu}@&V8CttZWU3}6RlY7y6vtPJ`H^4k4c+jy?`A@Z-)f}OEzxy6iX zIR#s>aiZ+==iVza*ZRK(hcj?l+e@_TzE`T6ylIOyOi7fbW+?aDNet*{W?)OY{tx4< zW;m40g*Gg?gNmrE%6LsF(2X_MAD{GDmwme{PoY0GSUH4Dm3pw8>F;@1U#sJb%2uz~ z{08Ss5MpBXDeN(5H231U__gi-F>$xXM?o5JH)grJ$*{gs58@-|(T1p}M=;WmG})uI zmba)@-5!GOMkk;>V#TN-8%n3|0Mh)lZ==)5K2eRD7J% zCrOv|2c^*l7+KDDSbHF>dN}{M%=U5fanpx4--TZo5HGT(anBZGjuK`hA|D322_Ypf zp62fz2d1AL85!LxXIRUYz0Jeo)>R9x9{KQRJr|QY;?*{p+&C-a%jtYKtoJX&-n$UNw|9!xgr zSA)5kxwD^tu9076RqBz&=)PcoI{$~S*d$j?M3M&D1VHV@4))S*J|V>Q})UoRD%i>#2aA zUpcHr-Je`T_DfIKj1q;D>km|+Ge0ZWTwqK9-R$;!<_O-!>$Q*EJ?Fzk#%VYV3u?Uo9}} zuuAZa?nLERYMWJlR+;OD&opNOy_oR6obyf3E;$ST6A?tFTMbiZlIF_0Gj zT^$KY{-LKh_bXeEzgodYOICPpPwd1LC*35noG*ClM5K&AKTXv~?4{yFx6*uVzI?g8 z{!``zFl%Hzl^a)!`iRV^0CK{Ui#ZlcGo5J-AdQf?yq30tXBu^j$MKG#3Vu5VQn8I!i68{uXbBb5?ruP8#jMnoon-gCYs$#YKElF~Xi0Y1*q0E(19GkjEP5{6JGNU0d$D=8WaELEFMj z0?Xs&S6b?BU?Kzb-<1)|L4`tQQw>6}$UKy6upaf)udkhmNiiS!ol+S^+ zDsUPpGkZmGAfPs0g;GyU)*JjMCY=D5yKhK71f5x4e=**o<=u%mYL|3spNyI%as*df zJs?JeOT~t+s|@&#EjI`8cd$+7rX746%phf$NV)UkjT%pp-ZJ2)_QUO!SV|cpEwS5a z60qAGD;$T=Z0V;uH*Q)R*1~D?Xr4w{GpkEoe?wpZ%JK+E82w-r4&ENBU_wOL* z=DP+JvbOVh)>p}FRaZY} z`64DZ*KvmKcT$S^q+*UxQ&XJgr=^{rqm=k!Ze(O(2AvJ5@4$x7Q$?CmEe6~L1^3dE z<;|by4U+icEpZljD9&$%ixld&ua-poAt_c(a0?@l9*O{j}W4epKr&MUVsjze{OeDzqi5YXgYAow=!Dxz!GE{sV z;rM=0mJx_;!pG&pjC zJQ2Zf?u?eQ%pl)rV^$Mr`6R8f6{(|`!_~F|Ybv6$cgz375K_^d8a`HuM>#^}@Hd^C zLSHYOyV@_kz2eq2Tw4~r`!I@5YdGD(mKxAL8yG(zwr`A>PBmSz(yF&=_)v{Df|ag` za{O~O>)rWesCU3=KO+&^2OD0u0NkxCG#=H0gx%I~YK6w#?A^Rr!_8k@7!f#pc-hWg zhlc*h-Fd;;dy~*Qo#m)M=JL&>ltF8pdi#Q=QI_Sc?2UU-hT-+sOghTqA`JoPM`O=^BINo( zR4NO+EqU(BR5`~nE{hAQMe`AgMW3bT?CgN~fq zHx@TMC6|h5qIJPv{sdhyFw+4<-A&x9XxcyCn&OyM*rGo>_NE3mh zXB=3SKu3|@0N5}(SR#V7)qyKVRS@GJH}{_N`pW3qIi^x##sARt-piDj!n^}n;XTck zJb$7=8gy_W$n@g+m%He~hkc!y(Z##{uyJFp7hZ3DhMD2PhL&S$&P)Td>Y^+6tFKrD z&Z2Q-o<2Rj?T{P76fQ{)B}3%&yu?9KwUOBNkWP_0Vai7Lo!(lXTf{~nY@ySMja=F! zPWR=ctJ@#4hFO~EY#%qOh9r&T5PBcc#p7+P4W!p?62KwL?k>+|Po#4KTUr+$#1&2_ zflbf&vbI|Jlb_3IzC;CS$$%9FGD{C31>}bloKg&kl38x48W>l7)_oCs`S0kE=5*Nf z+A8`_z~4bR275$VfZtzv_-$I;kGV|<#Y>*W4R5j>jKUw!Uuy#oT0B<4LwW)8Fsn3? zl6c0Cv|9xpKac&>9bZ^(n*U4a#j$&BOfnN?Wf73I>)3yZu|DjxOQ=p~pUtUI`py2^ z5Pwz6AvHQy2ym#t9fN$}F*wshcnl_4;i6RFY5m8p#n28*kKII=r$NM78R_Rb!Sx7c z+xh&P_VJ%2LfCFmHJA}2*q(uxsWM!!O+b$TDhQDhH(*7*xrq$gR@z#_Guz-6^BU*O z30#lG4T4jZ-b`^7E(%ZLLZ^q&@oW4vvTY4J9=;1+)!ak0th82TjG$C}jbF{zV+N zH90|M%i+cq;ClM5I8Ar{0)@Y;9sk4Mk?(@079|2;BIa zl9$JVfc%?LXJ_|{{Y>SZh#_(eH3=EXWwA<>5?r|%T9ZcWe4k8MVanC9Y?p1kNOiCQ ziq$2OyagwQIeV-bL36=Sbkp$Jspwj}V$GVxM(;~|AMCduTd8r~&nx0?PWQUF&@XT- z3INwOMk%D`coI_eDB3FS**de%jLYAL=L$^l66E7+P^BnUQjUUwCEXU}?j zE9@LbkJ5OML#e_qd*x^W`I-k2_T|IE%bNIo2jWj3RMkoUh!0N|f1^1y-fm<{f-of; z=F7GRTg!ma(t_pejIfPH`;Wn|W43x%Kh7+Bd<^KNvv=obfX8%J&%zwi;#{Sd6zHzY)Jje2uXU7 zKx*hB-Qd9r@VEr<;N7oy){dfb!f%JRLOX>;rOiwg!7QhYv~iXiW(7&sRcbGDNQypg z>G;xP(mYlxYBCl+!v{&wXqUm;x)mBz>X0weoTH2MStz|H$7gZ*tL;cgtD98#(h zIdp~aC8iqW7MtHrn8>0r=TfW3%GS}~z;eQ!>xA8@A^pRkX>otiy%JUhLbHKpCO}K7 zGUjXm$4eP4BImJ`>QrqkyWzT_FjaAQs77aGD{>P$065amJFr6wH>+bHkuyri$k z+x-SC)z&)lk-;#PBxxl?9`trQ{^rh&uP-3RniT6Usf;@@QjrGtKl!J5Wn@$-mxaX zu<=1U7B3?FK)a{lWH~xs-)(}`&PhW{lOSqZ4g_GZyNGFue+Yx} z#cub>V*Q>wNU>@06`F3m*krBrV!dCklKyynWh>*YpON~=wGWPA9`v8;EWKkQ8SW? zeFiHPmpEu$Vi?o)ZzY4u*+$wJYY3StDEV*c>3Hno zMElTS#qNh)%79~5>Wdl?65Wv(M>Jty!4pr8%>W)EnKH6~*gbI~V0^^L2y z!X|G@UJ0L5Y0p!J5pSRTaZ0M9TH6MwT7HU{sUcq>t_JC5EgdQ;XF<}wqY@^Hy$PE5 zjqex_!SnflMtWYpd5(s&FuzuHoc@$60__q-3%YDOR!pWS*#(dXn_w@)@fCBuDHF9U`U~Qs8GL1vHoBLy5H=^rQtuxX!ZC#$;!Y9G;=Ro4HFxr!4 zxS9y4y;+c3%0Hv{w8ifBpbPggyNkTv|3TIasE$x>ty!}Koch#k<7+Cx(*Az?JRK>x zt^h_iUYCoXrn_-(JW?&i5oi`Tt<{MpMb^)P&y#*nuIfl`MMmWvd2#6-N642)OS~)X z4_r(?Em@XstniM+x;VQ(H|4$xoY05sl^Wb~_YhkGQji)jH^p75D4q_D+LbT|O9{K& z6;g#)T9W@^{80uBc5$dc!MM&{BuLpj(iG-Np3QM{jRPsu<2~_@j|W?4Wvp4lmXNbB zteb)YD~$IdSJFZ*q~ie;X)IGVz1}`O<{@0-gF|!|x*C4LJlUVB)-YJM^OwA7{91Ur zX+~v#v~DsP&-rTjwaoFdw0-bPR7vRn)plzsX}i^UDWWN+rMkDkTh^q;@X1se4?nB# zdZBo+NZe#KL%eSIz$-ZIC9-m$oBn1&&kK6VU~Xm=iF@T`m7^+lg3I~=;?2*pS5CV zbX==A?#vQJ*a&N7i`%;Cp*K=;STIPo%e3A9SOnEz)bb}u-RjP(2=s^uB}N~n&b6oc zml0o(H>)UgfP3@4&sd3dR%M^-#dW>x`$!Uf#&WXBpqjww=Vt+!DpH--=I51(H-su? zZSaeNFgKZt653H8Y_~Smen-04;GHLf=-Y(iqEY~lm79E5?$(dp78BxWW z?kW|=y{_D-uLJV2B+-}@aSFelqX_{bwnFcC%L&V|I*o1RrW!g_@8_J2aQhk676|N1 zA9Soqe9u;x!_llhknAp7S0+b3d(on-D~lAtH|z{DUE+21@9p_0U{BbjS?&-tFgL@XwO-ax=9e>x|_X0yB2}Y22vd_dUaF zme@7oI^Bo+Q`?BnUTg>J=nIXT@X!E$#Fg7VU42sb+cZ)UOdiH2|U~} z;yM|CpbkB>M@E8$JvC5fy3={=AFdC^vs_enKPW)EADsFh20<4u;Jz6REUOZkD*G|I zzhTx!U2adq=xP?rD9RkJ;u%syr=Qp7@&oishR^8FEco+!?YimYtMQV?f!d^IcCV51 zNyA0}*ZK?h#<)ZciQNHR`A~)EEArGR*_?y!M!7_)Ui4>CWmjH>l2kst+PV!!!Y5A6 z+!RI(@?rXu(y#_({5_wZzzy2_#$Cd#vF;)Dazh)rbW+$FxFQVxA)7G|f;C@DSCITm zX>B4btv?As4>~7+AKNT%`_!n1WPU|^@ox6ed-8V_Yojk_j6nS+xO;;w)Pi@+&CGaV zws>ob2=9j-4O{xYt-R`9YT!);byJ<9M*Yc1#!x$n0>MQes^cD^>#nn&?#8-z{?9v% zz$nu>jH0oIgOM-6+cY5PNvgbozjGvqAS>Kv4XO`Bx;ip&EqL&{)$*8LoqR~7UTDlz z?*eC-Mcu{4w@!3^lLHaOxVl6yc3_FKc>`TcNdx#$le%3C%^d&) z;xGWlE2_x$r#UqlBRv^2WQ`qKqsI9&kVKoz8H0f#g?X%}`rT%owhp6*?me#fPU_Ch zplMqeM-?+sA$_LLJ<(#$^9m2?IeZ#J6P^{7H!SbCnD?r0P|g+=4wIysuHLffoiX7+ z3(%&d%_dQ)8qqK zqAU^yVwwpsJaK_R`EKJiyzl&EWgou_A6-5P0^iKflG&b$`AAaGS=o1crG@6){pu%B zi%;jk-;W-~3&+3-SWbPSGz&N#?4m$;tlL9M`j)*nS)EF2M>(;5ZQipa+fvH_R&BYB ziBcZ7J6rQtVzw*z0uAJF*hM)+_AHkUpyQZ!{&R z#G45yKNzE?-cOMeT%Q`0qj7#J2n=}q68E}ZcD9*)5lw=7qWqDTIQC&8!YZb1s|Kc> zVkw5u+n8tz&3`=ya#NMn3HBV^$38r;c~GSXc5=N_P`hAF^9>_*68nr_W9+b^wK-e8 z-m(Wvm8i)harXN(8Yq{8q0hGGF8b>)`%IYWT(9=#dCv(^pu}m3I>IU?%`LFP*^;^~ zYc$Si;Z1W3@EqU$dwCXnGI2H$V7rNedq^}1D$5WcjcCpEU4 z#OOtsD@t25)9cG0VV zGT$MC#E|*vpgK#9L8%Nn5uc+hN85Ke(B#a)1Wt)*k!fTJ^goQq4r>5mbAs>)uOaR= z%h}8J?Eaev^d)w zSpIxV4OO4GrF6QIv=P3P@zYGHa%F9p zo`2^J4mE~fUH4=xMW9Iz#q(WrsHbFR$gk9IAf*4mtIn+iUy5C2zXpZ%yXF?lk`yVm zQ?GXwWh!E2NUlCf!U7YgHH(V!4-Mpr6!IF6XW`HN{iiK&and4zuPc>!y1tdKt`GBb zy%~PS;i>%MGcF558pVb-*@tzr?$S3k806_?^>VsWpC;KVhVmWLPPeh1C{By%y5WOc z3HBZy)gNZnSamojUJlLGVpu18MGd>`%TlB07NrTRx6A|b2K-HL_z3uW^3-eYzC`)) zqcs=3NG-LVDNn8=v#;f;WL)pYz+dqZkuv;pMZilKQ#=!G<23n6zJFW?X?|CZn=qG$ zcXMKz>@CiduEbCPgT%doV!qZgV-e9WK5`_MJ~FNvCDwGcu0QYkxvjlfpoSy%tgFXc zDq4bKW;?*BpSJiLZ$jRAh~#SbqU*GwKNq36K<2jJSsJbI{pgI5!EgDm?543Et^W;cUm7)-D?b-a{Hfzl zkyV&GF^9+F8gLo=gpKYOrkuFQ($DNP70I4Po~bs3(eQCGEyM?~CdGmY?>mms}5AUDIusM4w9a9Zj2! zoHE#~hc{zVB^a&*%+SU4$XXSc0ej9m+e%@&gv=kVYZ3Q2$k^)D;h?7WFzr$|dw2(q zv)AYRm~!Wk+Obl$MuMqw1^)GNV97_@MC7EqoD&lcP;&O^`6=3)5(#W{3z`a+37z>c zLnj3&5dwybXE>~;52U_7=++Q%1AK(<<)^mSgq|#oYm<$glxM4&?Ac&kJS%nyBRI9B zLQ5QR+$b-o$!^y?BtU6`Oj+@{nsQKTN)%e2IAvyoX9c?}VGPB&zudXE&3}BIxIW4ae-@p&{hBK>w1|uM@*|`pqV|$G z)z}KBEJKmymG?_sey!jPwOwe44phrQC zAx<*p)zF}_v2_~QbP8!v9T2s1msjVsmKx(*ty$%gWo0Ikz(c0=b}?CW2M>)~H(K^u ztCi>EjKNMXB_O!NDrM|Xq+DYAoX9`;2SXgI^?1He&)^u+z&2MMG94u%;5?g%IznAintc<9p`x7 z{6vJ*fZl>*8o^WnifXab4_b3{Ii6dtEgA2b#{#VjVNJ4)kL8q2epC(B8;G#gnjtHK zCrT7&Q&aaM%;c8ru9YX39S}apv0>~sb^g#IO|%Gty()QivuU);p2pl7r8vj!?l50l zmTDzZWc0>ytvzByw@e)EC!lwt^;*87IyAC)Gwbe&nXV9yDDen7*@HCa3K7)Tun^W( zrW?(qq6YvuH_cNAd)_a9<;1%1fkIKcOsk=M=+iB4$?KGKFwg`0#cWl<_zSCSCMlfP zY3^LDNJj+I5IQslC?P7c43-g6S*U9~ty!90e@mjO5RvnAG27xDoMmN>L|5eqkZwO$ z>KeQ#Gn@l`DN9_a{yHrSU67RGCp%`LtrQKyWXd!97)l}T_Wi4HU3+$%R4F^V{8^(& z)1f^v-ov4B#Be_o-cN*8c=(X9BNXYw} zhxmP-yU%}mHex>2D~OxF!mJzbmm|NKj=Dd;jFM$Ox!&ycAs1WHYq>Tb<+L%@!5Mu4 zlN0sozPkShI={oYTEL8a{#;O!sp|HZzo|okFSoy#D5B%{q3xf@@!}R4QS4BO?P(o8a$b7*LYw7phlUemAdp~(tW@rtrCQQXzUGjrOb@@(2ac{yB( zjg`MA%K?;WM#h!1MQPnF8(to)TM&Qyo<%)En32r*9-9#sVJJ-jRh1@&CTSCzs$cW` zadS4eF1penyJbI;GSn-2`v`NI&ocU0zl+{@Fw{l9Xb-=c)d*uq8DAfw!>J(o{b}sJ zxajmct}iGo4j5Jql$h^gva&qRyQj#8p9X=xW-jgqW(z4ieI5u`eQAE^!Tg!t=mgV` ztMPjK>X#9J&&by2Y3%`-ixN2F_cWfH?I6&))r6wznyCxq|%;A0MN z48R#@PF*}#qJodkkqIci=X16E({kHsjVO0*s$J5|^h0@LW=o9tjRei~Pl;clxQN4u z&^NuIPb6&`muI5`R2W>!w&`^{>LaCB#JK6f`h7Z03E#%Q$xP>Ty+*>I6?K&eruT@O zr3STXvo$l5)%L(YTH6k?dc#aCZxpr#!;MmvyiA9CWw83Kuu*&=HRdOVu4dYr4of@= zj!jNXveub2)KTl`kl%J&Yyxx}vW|>0hBOE6{Ih9oO_3hyq3Ijh_K83~QOpixG=3cP z>DarFio%MN7#(?hr`~9opXI-S+CVH1Z^wn=BbRqVtah3zt>2~z6nsQr`x#E1;hsRzX#l*kgj zmId}X>y%_f%0&J5>5)>R3a|VJ+yPwlyxV_Ce9|Sb#L_fh3$h6wUyq=z9A(5OgS>Ze z#Ski1yEF}DjO!15GhYid_@R*E5#pXldHuq#KK4`ROEzyo>f~Vk;Jkx|dS^{%pKCFg zG8lFq3`;3Vf_RXC`%hDd)x{`nAHRqxD`wzVa!y9~ijN&8-f5EucaxIA* ze7khBX=$GSGAP=@=;A*9oM_#^7@CG)at-ML%9T#diVPNY)D{vR)hSE*nw~)C8wbsK z8Wk%sF_^|cHt!(ggvogu8zf7JG|u~0avA7E6apc5pb!p#1846+aQr#8*a6~{tENVa`_`fDdQW@j>SD}3uY8P95U4WKnB7+db$a}gq@`j zO$W6STx~Fk8nHyQ6wYTKwOpG-#pnm!94)gSgUlo80k@Vb5;#Qn`} z`%DwBa&7Be&1vk^qkfa%Td*^dF(73%5v;Fnsvg zS)+s`hF>c!%$v2uKs3INQ&)6ai_z>4Iw;tZFi)oS@x>L!FB1D98$KAgE>9Fz}H zoc!X{$L1wN#H76lnAHqAL4jwI{PB9yD`VlSl^80N+LjbR8L_b_vke zy_*oCOVc7=I2X=kjJN z!CPnfSV&L0VT=EPLF(wMgx13fw1Wr1XoZ1NMHofgmS@8H4IQ>*_}BVaUtPq-lx)_N=uj)o7Xc9;-u&W(n7v4N&aS?rvm=W3eA3_7P&-kZs?JZ8!gA+yX{ z5%l*!grzwKh^;+-6?byhjHCZR9Gl*6NYk^J0!il!fBU4#1uf3;038Q4K3u$Q7jH`$ zrYp+ky&%Ra9qzQ3Y}{uMrcKU`zyCWd%bwQ%^y2@E)%iUj%^3@bzwv9G@IV%j4T=%o zSWE?fUgGGfZ?2x?Bjq)b<%?T<&~h0s`)m&N9)77m3*sru^iQD1=#WTQTnWuNZX+sy zPU>kS=|UQnQtfrg>jc8ZhwJO~OB-GJL@Lq*ivcA`vI3p+_Suzj!WwA}>aSTvCi_1B zW~GM)P@_kT{zYF2^xnqk05a$TiHVgEWR z#-lf+>JD!4&+TN49}4lxe9%>g-W2fguG7s5<>P13tpKZ%u_?c-?aaVaQ{{5qnPO)@ zX&+;R;di(#8cjbdt-Y%aIbK`HRX3cgW-%tPa#Dm(%w&4hGG}<7S6P^JXveZ(-o}FR100^ykNt? zdaYu5KNdo)AerdmZ6d$yMex!tfgtI|)z0zMN+bY$r#Ev6F&p0|IBgq_tA(&gZJ4Y~ zH;Lv$oAcy}j1IU}sp*%SkbLM?IVF4_95*;QdEA&_BV?g&YP7zY8dmf_iq1N!>9>u; zBcwx6q(ezb0qId9Eg;%#7LH46me|V>;~tPU0_SMAN+*1oC|t=7toaWR)Ph`)lW%0~-@2O*N$~I( z`4_jok+SE3Z-E=^`05MaB+F-9b*ERR1jD*(Z|Fk02ksZF2JRidVhZCvfS%sBF|YG3 zjV=qsqgr8`Kj(6~dc>zd*kVf;_%pQjJvT?hj>C*5YDoT%l^$#14=9u77X>6LI9)L)`$~3uJN=M$6LKo>*!l9R+Y(9&H_P@mfoJI zj9FD*bbmB+zkqJ^^}JXA;H2s{)GyWJL+il-^D|ip2b^*!;l+-|z@yWxEi%}&Q|(LD zai&j(!*g$AQ*r2CaZ8(@N}2&I(n6Lc!@_g3gZabtJSao2tTQ+r7@upGO=N*N;DU?w z_gn}&UVVvqVm*KRKR_a`RMWo}ME%KtdHvl$4dGB8KHCP=v({nN8YHwhI8i?1Geplu za*F4gso7{F(+|Q9UoZx5DIz5hR*Lh1YOv>UCvo+Xij!sWv~$p4GXyWXmxW1ovYsPR z=wCl-fRj^?zFx8roj?+a?G)Uu!8{o<<6o$`?h+XDo$>a5GxXBJzrk1XE47f-0D7f5 z6L;;sgyITutuXBPV%z*frEHHx%_zz$ZcK?<& zr|-C&>{oWYbzvBo;#NQEyz}i2-nDC_`_^E_pQA$_3*JIQkaHTdgDxqnB6jlQF_KU@ z93~ZxaJjk-Uj_=|)KwtE=4ef&RU6x5-pppQf*@XVs%@DcnFQH4)?Do(irb z_=U)X2faXgq_<1|@%YSx+~5_epfE+kM(J$#SPpZ}Mnk>n&YwG8Z6NULc08t=?2PDH zo)OCQFGB9v&NokjE$V~QFFf(2cXj|2w7RAb()3Mzsyq4`Q z+sY&9eu?%kXgE}?1WL@!E5+$|WoWi5skSS7i%q5gCf9r!cc?V~5?uSR>_n-LV26mK zHjq(>j@xjgH&hz?xV^ZU?QZB!MBK}t=A8l0nEX3n3ToQaAP44VezcBe!0j(S5e4eB z&OQl(!_R1fHWQvLfc9Jsg|N8eSYGY3O1rDLY%K5HRkMF6-V*vV(>7U5!41Y`3Gf{| z#q~6#acL`dT7q~Nm<u-SxHzd}-?uB^A@U1N-;@vc0nZ?R}9({RORCGlC zPZTCj;+o&1+P=|mztb~0n=!!*8H2=0J256OaKmS!Zb&gvqRJtSBtP%di-HLJl2;8} zS00-CNVyJ}NZVhgF6h&}yL888nCZSmy2+lc_J|8Hk^~YUd3I>Is#xQoiV0}yLl@@~ zRI@^8u(r+hEp?7%G|sb?4`NIn?nCA)Be%6k*KNiKy(c2%7=$gS>S)<>u$jlnjWv-; zm6J0K)Js>HG9t9h&06H$X12%>h0eW_A<>G5Snt;H<|5A>T2+uRHmQf$Gwufcocle4^lC zJAGL{U((VO9diCw8GA`{G970ZC{QUz+fqbF*2~qVF5qr1yC&H*k>)@yPk{BcufGhC zBAA$$!!aFLN$e;|(=~%%ht#`w%YcrYVN?u3%nf8N8=a`*KGlV>^7eDk{q~F?+*fW* zku{-Pp2&2KKz)Jog#~jz7Cmt}Q7)bNKHIM=w=fsa%@ZINPbO`>IO#d>7ah(DjE1dPjKTibPc$-b|i_EToJng*JpCuMOxq>Vk~ z$Z;{LEogyjA<|ckTUTd1e5TWTTbRNip*P|ze);ezqRbF1J+SHpH?_k(``}hwd&|)8Lv%%uSeY-%iz<|j!q`n}R^Tg`8=Vny=@UJ6 zihV3V>39b~o;$ibg)2w(zFN985O@BeTs@QeYR|^oyBANJaU|QCVA_4(=fiFEgR6d5 z{p@MUg14c3S|^!=2J={hB?;j!9Z|p&#g!w97_nGAcxX;`b6CS-sYm%-J^#6%qL=Mj z3f_JvL?7NVytT@;a|pj1UO5Fa?>tNx94r*hZB4iT3KOgslqM_P!*vm1_yzDhl_m;) zyMgYMXADa}KX>-Zk`~vf+mK>1JT3bCCTpCVpu+-Hb(!t<%`jjsA4d?ncIj-3rc{mPDWifkIo1 z)+HYY2Q{q~emt)X0f1rgDg$lyej-JKAJkyd37aB|JlCo`w4a*MnPQUskE}Iz>oaZ` z!+|4m^XTU6Pm1gFl829AMJ>2UT%a<1-~3Dje)cO{iP=6sT}!w^-#P3hYJ?&8`e4ci z?-->1+1L5Dhjcj8TDY5H$vd>%(cPGmO)r6W4~tPO!f%@~3qWwL8*I)i{Oo7oPcasT zg!mw*OU6M4)>(EmXuOjhE~agWI4`S|lPZADFSdeI{2UCltX{EIER<*2}9XoOHinB{$yQov8|9ic1o+&$uhq--n{ziFY)wQ=ODUs z-B<|y{Z*2QRpRv!?WN$@QD`Re;8q6F6BDRd0xs%nxtypxVtBd4KL0+)%<$8txluVe zm*8*HA*{+>J4v+wk--Gdp%BN}PKq6Tz3~&%T!V8yMh7P+j#RHX1ro(y;}P5h@mH%2 zIn)6PnKo}7+D?){uPuZFn)UZa7H&|SJuCfQfO3}IwhPg?+jXE!dDE661b!(&lJ@!c#CLum`0F67y{T@n{W3u6mEB8`C;IJ;OS$8UbE~&wM0&{CTq#A;pNK*qNdnFnco(F+Q#G1Xg)O&#jwS zDjR(;G-l1-{jRP^HOzGoq`EWn?LPp>b(Iv&KaAm7>3j>Hg9dRabu|2%gI;Jqt~X55 zQxPXKah}}$N zOMBAUi|u((EqHnG^FXlyPuGCSjIUeAx6dOCP%>E$YcE}F3+vb@hlRAo*+wH}mYHhc zYv>sxI$Y>=L1oXamRN>qqIg9=u0a$p9u`lKSKR(CTG z1T#O)NfS$vfuYNN3<3^2SB)!8V!U&&GP%5BT2&F|P z+f|!pRcO7u^jh)R8N+D!iW%=)dSc$tolqXAyYR$11MM=JF+mWn$=be2VTq*z{iy#~ zN3p`8XY0dT?Vfq<8u}mLMN<+z*lZuBV_)1p=UVHv+}^;lW=iYJJQW>2>hc(RG~{sl zWgGw3&~;S{jX1-cY8xQu$g=>Um-k)E%*X zfg7j|R}2rPd6_6F>Ds-B&C4QXGDb}*CKt|}oSmXPVpO8Ku7@+{<8 zB{ZN=J>_BN%^I7^=C%t1TBdRExcrL0^RYXuARDDf4ut`+;u&%R7A2x#>V{$AV?5dJ zV;7;r{tIMVCaSMn;xSs-U2%IX#IR15Iy<1#HNHV5f5BKPKB_Y>OdgZC((KO`Rn=kH z@azDW855lkWazMoUrp-;I_pA?;zE1?*~0qs>kpSNN~d}VRGF~bLs9HN1|;ND7|-DM zD2(3m?N@+1sR6cbn>ijYSK_Bxcd%<;`Y>L`z`q4u99?RYnyPnys+R&u2d_ED;GWC2l7=N8`;I6loyhk5?){l9-FU0DGi$&)JZ36}D^bA~3 zY#5p$8$Q-8*v?yD@mRNpve^I3PALxa6J_y&c69hzwMOv1b~}WHz4J^&`Oj~@PrV?5 zCfcp<*9)>VHZTd~uNPsof0x8K#r?8cwQ9`G!#-MIaDX90ebWn*x~cs$T(n0)Ex)P^ zqs#VidwVs}R@oGbrSY59ppT4WZJ{1h&Ob~McdLf^=$2OaJ=r_rP8*EZU4HbA^LFe^ zwP-oE7{HFv+$erDG)MWN8VgfFo0Eh@s_WQQJAY1*0vhrRDKb<_fl$w|DyPa(398*- zBfuEzvv7}h6Mtd;lL60_3T-gIfkh!js?mdhw?&}3_H@6qbf^RLBY)u#1OHx?!pimN zi2tK)ij$vDJ^AP(uqRufj){S^;KKaJR^6)x7pXU91`T+7as6A?ttLBz&HfRCEZtzb zz!jFD&jGf*Pg;YGdr`jlvU*M45f6j)W2wW(cUFhv8K2*Lt$ z;ZIF$T2CTA@+I9@#&n@OppxDV$t`VHn)K{XWiFnr537GY>L&9#st((-S`OQJ^T%WL zL+t3maCljm?}vEWinSSrM<+{S#t#OAY}0Rk(ROQQ2c-TDpE9%8#3EBkRv5b?+VzkK z*6AvS1jO`7MSTr?jeL!+j{3%S8Y4(Y_^d*zkQrJI-)`Y&m)wP)+r`!Qyy*mIqx zJBhb9UTljDaK+@3Hiv-BH6%AnM0Hw4C@)VH@#BRt=mEpef(&8whK&&X&8-H&tYQSu zAD4e2B>0dYH>8vfJ#@j$_T7Z=b@V73Z!5gkvU35FeHP}hZNBl5Zmh_cE=lQ; zQ#B$RdS)O^_699gxxI!7acOVW3P`ENn=M zp=F_#nT67F+`xh~yNGC;i0EON@qy=PczK7Xw7q;nstveWUBh>ZEUPa7AJB^u`(aJdWl)a{h#AuF@BZ2%0w!nX}dL>Q3%|93;lto7Yjnh z-XXB|R-0x$hU$o6jM4q}cZ(%&i~n|IGaMq!SEO$(;3M(fj5Fv>S2S%&lu=i@`RR#> zJ?NJO+xvCpEw;($_`ux*p;NM_vG(QgYU#ReD&*XS-0Yh%WBGbRQ;Gn;g0yJ!+leQ- zgG1NQC#rC?lQ2+nXLo(d)i(EwI|_6~7tGcy#eoc4mVxu^wxq_nyEm`U<+M70rxk%j zdHwb2Y{jK#buIN#yrT%$h*u(+wk?zqC^=u>%H}!;Arz(d^CK=w%tt82A^gTf->DF3)%r(=bUbU)Is%JI;}3aCJPR z_zNe+4W904$drX%5P}B=^h5-~irV(MK_x80%f>D|8Jw~fuRe#1J#_DPN1{nVTiD1C zXa{IJZ~7Dc1CXb@q*$;{FIJJTCvuc0JUdTA<;uxaSWOOg5$|k7ZQZ^|!XTtSn`RTGFsHBHn)VXpP|HH@ee?|r;8j(7Fq<~s!B7{p zeMYG}XWg)fk-K^R_DiN%*>|oX4r`l}g$ii5B-|DCV*b_{Zo1}#&b;|b9!Y>8HVf>A zHAQ9=Z}{6!2}p_Wl4^nsYf&n2-qh-Wd^LaQz9BH}Ep^JfLAXI^BgqOxPG*9ZE|)6a zIu|@+-rvTooSNucn0T` zY&+k#NfKsENXW8AgKwUFsVvcL%g&OgZMxIeqD;Io_LSG~&I!G!Y}>LS?GMq(`MgG8i1>wu*G7v5VhkO2eA&faGqHpg5_ z40Z%Y%jX22V}A7y)6g`2BgBoJj}MptcwmgtF+6fIYk?d`CKyJ9s=yK|OV|J2Z|JarQtgy-D{d}Sxq)@!@$U63DAakLyfw>aPmQXw1-^hPirDB%2%(+~LqTRZp z<-ECgIwmG*O5e}zEGsFu*kc{5m--!elG3{ggQvw?2124YMUWA-;K&OtR|-e6E#tv? z<>n;}25d(W(6I6$*DBB*-S%bx)<^Py)u$Y%|IE-&{#GMPR`RIlsxX0=`N8J8*r>5q zakYdQG+S_px}urm^{HV;@!fP!qj3kRn#nx6GT1w#+%mVi%xmvI6hqpCf%7|%{v>SV z8;Oqp0N5t$8zD~8Swg#aS~Kqp6c)A4&-%Bwi7>M7DuQ1Ja?~L@ms+m2wvLwX9eY+> zPisGU%0Lp13u5#(LbJo?+Cwn=?8=6$v=MU zZ0`~+jm*2ZMkABvB*D*;o*{FuhH18Og-u|3;@~3lB`nv#Mh$q_l9K_xyMOg>;i2=^89J%o{GL5dq7BG8y zX1ACrpP2s06e`-PEj;?QAW)T}{Q2*+7))xhJ{fPBE?0Rsb?kmoe44+he?;`2W~8^H z^T6Ey*!zp1c%n0*MAP5Sy6Pp7f&|No(oHlzS;9SU$KJX4sAUP6kUX+4lWB+ZVuKB z+*%8gA6hd7W=C3L)`G>P(yk~YQKqHfGMLcauz3RIZM#V@)wd&m`G|=yP)Vz9isCAv z?S~Ew78|EJc;3(&X{Y%9k3nv1k$1qZ{F40QcCr;OX?#uIY*ZWr^V>DQn~nqH=AJ z0TZ3U0Dm_)5h%@*Dm-e^6AQkQZB3Yz>7#;)o#+Y?+ei@}cE9TETFlajywZ!DBNSct zduv;lGt{@yF4v?yc44jv&O_$_!cm%#BL?Q>GvLynjm=f2s@;oJ`-zHenCrxi(AvZ{ zgmA0xmR?S+=@(h}*Du(Zo}(<@eZZT-9D9x%%7zTnnHY3yfMX@uYg^5bCB$I9?_}-!r-X+PgQnH26?SI;8ZfgaRuq5oT%tpKXgU^_b-25nJiK- zwo`9dtBmVB@C%$)9~ptBTiEq&VWPZu4r|YLGfu7a8#~N7I&-b#CrQ*_k~jt2rae}5 zitTJgiI1S!zp89?RqG!?@>7`^%`N0PHOym)1o1HH3-ixt+YEZ$@*jY~$jA_!p&Gm6 z%qz8wL#Uu-TK#R0AVs(in?bUb0#bzHMF#CiQUE3M?Qhf3*GNCE=bSQ6x3WApESc(F zCDrjI-bHrf<~ZoTkoi|e{9tkiDbgOqHxZAc-&mgfeP3F4;ToRTkjCo}@}tey zbD*Xg8iRx8t_0iA`A*QE?}L`Dc}SftqEEK3cc$JIkq@aAf3&H~C3w)+ECDMH7 z+LWyg&;`CJ{vyNh6=VL74;ohvSxC*#MDP?`xl(KDxY%-+V5$>5=LG)zA)eIyCcrIdG!9XXt42M2x%vIE`4`< z>QFHM5(7+c!Merb)(7NKs+mEJS49({18Wl#`hy}1Fvmv;Ed_I-oEAQcmddBd zVP~#`kS%wU1OkxS3|UZm4S7mo_YLC}oQ@RZ=o-1Ox;8`l$UlAhqA(vPKmD?x7j=$2 zH=?c)(hYjin}E6$HP=BJ3qR_@1itZi%~uA))h46H_@N7oHDKvJ$co4S0|BxO)+Kdd z+_umu1oBAoxFh+Kfi_Pxl8?tn6n;7fm8Aqbqe}#yU{6^cLZQwtQJ*V?$p!cmv%QYy zaXPc~kk8;9gGSZ&iDkBX5Z0@*L@^en{h-QU)_R0@I+*<7TkpB$(+aQoQi8}1^01{? zkQy5BHoV9M~cTukRgViqcVf1)97$3#m@(1{Ar;q+AuEZ~8%*l54cP<#yFW}$5xCV01 zY-o4iq7)-o+NeHA#HcRuc;yFQ+FS0Qw_0voBm0l(PnflNyvd-QDpfF><@q5k*xCB| zS_?@Ku{Y-rtEOo)$0TD05PSj}83WJTNm&n6=+IegBJ9XQzMlxH;dT&v_GXO{E7c^b z4??#RD37dw1Uex07L16}XLCf<=3Q9~N@AV|BmOc32lFRslz8Q`!w-6;SMHs}ydd~f zm5xSX%Kp&B-AwQqkI%n`2Bov6=k>Xp8z&cZ-xq0*wt1x@SIZSk{EgKa#UmaBj(KghrkmohCb)s5w zyj3h$8c+FWqr}dhsQl=(J+oHg1)&S1$>?u?RYC|(QtxQ)YG@+PNVhk4Dj@t*FE$iJ zkC4zor^#D-+oO_DnzuhsuaOhtL+*T8uO-R(d-gs2u>R0Y*CDmxtul(~s41Z$#c8A3E`$L3v@@LPwdMd6$~Wd|N>!O^*2m>I z$Uf6S32wG(ByN!dDGI<+jNf_)b68Auem2n?T1lkZ;yvy!QCm_Sgz=APoBL>Nhme>L zu{IZBX}l~Vrb`(;^#2clZz++x==l+8x}76FY$`=d5jOhug8UWU0~k>2=7Gn2Z^RZF$m{A=&QsVb`Rmqgt$F)=HH7n@{_Q3*1e)M;(rIdIpYzPR!=`D$i^|EJ z^(vt`mA&cH3jD}hNU9XGnG-sn#86X!=43Tzslq2)Y|i<3zv|^Y_xda|Vup7Eam~98 zfxpaLJ6Ybpzuo)$Tf|pjvRva%H@h5#`z7}91`}fgZ z^RM4ZoCs81n&y0~9}P7ni$!(y-z%LC6B7{$Rs7 zwP|xIUrFeVuj$m3CHsCl$|$z>RZ*(TX)aI_;WIGMgBQrZ!x%NI7*MNPvZ_cVi+J`6 zoWbYL<|h0dEU961@2i9K-pSGb1Y$S^einAK`UQLYU3M*eShkOz7_5-`z>0bep9O;~ z+Q^jFa*U4EQi47R>;?!cmY6tdIbN=d?KLH3165&Mc|_(%Ql!X$fXAW=Mf&yC<{Bp7 z=0qsG^p;iD6z23yGXnkV;zl-2G0PxLo*3cQB5qj^(KQE)5a!Flxv88>58Uwr0Cqgl z@~jc%KnO0_S6z+qW3!WlJeuR}uKcDdPamV(>U>40Culo~=s43W)3%5%ikz=#{q`J0~Ib&+GjUK!{|#4bUJTVp+PEN+>t2nJQBDf~0YJhNdV2 zGjB~@x6xxntAUCgkisA>hkG3}sJ2yXuWw^{>%`2M?-T-0JtG^v6*S>yeWqHae80-@PEC0KlbPna zv7DCX-1x%yE*?bN>P)H`y3}pD*kgNvKW3MN<0$gdIXE#7%Ovhl)b#lR+WqjXx>s zi`(5MQ>0;WhU-t@h;FK_&$+GziGX|ZW_^kE`Cro;DIcUMx)I&Z7h(Ky?)^`cT(SLK z{m~zjB0usmB7?tMf*|Ju>Jdj6Q1wAQP`(~f(7X^x=w3KgIKstjKJl~o>Z`6dhrnNq zR^0gIHGjTujXk7CpOGP-u}ytCT~kw#M&#J^Ba* z#<)9of;AJ0;=Le>@Rl>GKr5)RDdh+FbRmfSJM*RntwJSj0iDIQ*pxrzx9Uz27!R{L zA~J(qsOzy&|6O{R2q!D`uu2M^^xJ#=0M_ivz{b`e7=NC9#{FXzJ3}VAamcJ-!)7P% zj1B5)d-68jC>S;ts|=fVrdwADu9WVt7<1wm6(Ro*utoSpF&1#_K%W5K*B5hf*N)U@ zR2ht&*3R1+L+^|EkUIV2uK$qkPnSvX66;sIIb}>uRvV$;F{T|2-ye;RuP37`Pw2hb z?@uKy>-_f|=(oyoRa0Vza}mqgI>)x=tfycWG{a0E#qscd>k7gC2=CcZ8IeV6)R&)l z4Z!o7aN=ueQkRqoJq=!^k{=OT<;T`CyU*hf3978Z$F}u%b^m`!VL(1jKgc0%xZBM` zy{C-0oZ?IszObGg>l0NTk!)>FP;nTx2ru>a5x9joHAza%7|b8cyJ}iTf>+B z(Ozpb@wVfKm!Uskq+X6lN1=onE}f!beq~v|STh7Tu!9z4WZwt@*d$MWz$u5vfZ8zg zXqt$}z3O0c_z(C>$+^tnpVuPKlDu~T{s)*1()K+u?OOv~#9ppZ_6^8}!7100!|&P8 zllZ?&4(uLGLMdgg&x-%+VCKws-^ZlF zT0_*Xji67ypeF{fz6Jts{@j&Ov#0*Is*PK1Qb2fWN74p_oiQOWqQ%;Q_ZPd&%4buCRlpau)95^=cx3ek=rs~%L0z`^Syyh(7l(8fs` zcfDf^Vci=npZR*JpqSDMp@O_-skZUzJ(3U91i0H6;i2FC4(Pj_`hwYl|741wW4W|P zPecoiK_5;{R+P&(Hf71)ts0NK8CI5@nI@6MsI6ay_C&b`zng&TF_r0u%Qr7=p>M9U zduas#)L&fw{Z7H#H1tz8NUs+l9g){C5*_%KGLFw^U*ZAea>0i_GajGUC9K7!lZ-0C zvY(R>=s@&8r)l^ctlYLN&CAp8Raefo$Gnmq|GD=4-%*Pz%Pr&`QWi0RE|-p|NoXzr z2|f1}i_F!myi(pOQ@|`cdL^v_ZH&8#1KB-c|A2B2L}2~?5Mpnx;;bPN;b6k2KUo)p za}x4yJCW-df1iv2IB0+Nc{j};FISAG#e3gy6bUd3+)2Z80ez-E4-%eG$k=C}^tuu- zPQg)k^7jf_^7J&``y=zZPz*@ouENOPQ`QD7|nDFv=S@B8G={gw6g)*{>9VBw*?AKV88cj%*xl?qy3oobZ>*%m2 zx7J~you%l}UYvBJ+Ge?P=L>jbfV*dn8c)G8I^pPBw!)&??l7BY@pF*BmsO5pT28Mr zN1@S(FmvQ@{2s;&4t6ny{07{m>5ns8#4Ss z$FZ9RPA9I>$EBJ?bIb{Pe|QrRy+o+&%4=%%VarV{u;DD`U4h?DL;xfDN=6o_{KpzT zha~(wcjhg2^RcMuqK+j2L_BmJsZT4K3_7<$RRhA&1)>#JsQAQMT-E=xo*r>P_KieoX}{^}mG> zAz|?4!PSsH=@%!P(umT6%JntFabT5hoLV#aXa^<#SLz>vL@rj@jQ+50;ovzM-)~an zj%F0dw?y{|`&q^R6fQ+$|E?A)P|!|_>UsSAjG3L=2CaOV;*&N-p%wK_+L3Qn5PdF6 zYyv6n@P&QJ>))hPZmjwVxxr&fKhW2T_ZctgH3%KB3U6-#p&Xzf?d^k@CuVSLKW)fM zqt)}(VUd)3orpIdhITFZfA`WlzCOZ|}N$3{Fuhf>V?Y7`FT$phjm?pzeKL_MR~{wWMG? z0rFw!J}=%&dPPG$gbexH!A!7e9^{e^FO(Sb$ujx)2jdD~a4(yWdIlRyx6`2pWrh|P zbLGY4k)y1f3Xa^?2-{MIC#%NF86Tv>5S9k`7X)N z5&y%xqC&a)^qM@4KymmYdKnVPYPVCP^`s`VI-IY@XIl0>|Hj7TjA(_OF4c5GF~&`d zC9wMx7mX8`>}hNnu@5E&3s3=pV!KL_8Llx z-Wj#u#RQ9*7_T79`Zc`2xW8T_^3C8Z8cX3AJ@6Ed$4Hc%vf&iSYOBoFs+)Y8qnr=a zL10V0XvuqGO{f^kKiX!zS%%cB#)T#pzQmF8DDso^gLIv7*VNQ3Elt)XGBMM(e*tJF zm1Fk24JHK_4d%h~kX*wVnZTKy69sXpJfSsi4->PkB2{;vN(B-s0XjGif9#{$tk9!~ zep~na`=d`Yr7F#r_0LDu|H$GV_1)`O((S0pGsUl^?{yrVdR2)vnQCc1>uN-UbQL~S z-bgCmK!g`U9JWG@aAJgeM50fpx;k*5|67T-ZuS}^42d39>cY# z$u0VB`T=+>=?n1HppRaAVw;~|m8m>;S)f{wpehZ8v=`p^S6n*({qD);o`#;5q|Xh> zygUc8ELv}g;>%ivF7?kSVd%jHE{f_N6cy#{9TZgbs@*4@DWaH<37-M3+5A-{H`o%_ zjB6^G9%cJj8i*^|)87*(O^OEMXhlwH*aQ@6(5#vEm-wbq1BPca2xI^Z{k2{{G6X&x zsLEYajYp=weT~`4xRWs$!ibzP>zEHT-(LFES99+AfOsCS@yw{7icA`$!c5^qE z7@O8de-{qhc&uZ8#-Dhx21t#CrLlV|&8TLEQD~J=UeLME7+vuZp`4NCFg`{{Y2rZs zDtYfJ2hD$}gJm2avPBi>1Ya||9Z*wT>JMwgNr@q297@5eeUz{PBfgl6#4MaDHu7We zOD+u-o9>UFo>%@Io578pp94wT zFnoc?z0-i_eJjn=m@oPL^kRib+Le5qrspbJ9%(4>=PnL-8YN z9rfkg=52%gKNvZEaiQ@FWP@olu$aAen+iAW^nH5#lsqz3F^ z{Q0RkGDV_5&n-lC4EMJb%4xKZiw0#2cWLo-*mj!rEpePR1{teqF=J2v{z{aUL`AV@ z&d}qadPb<+G518K;0m$eBHW4~bxEB_i+w-(M$ztU6rgoW?*F3_O;!)9Yqt~utsH8I zR#?WqF*K9~dZOppZjS|>PVkc*;jEn;G?^E{t{FDH2#b+hTiaA0ry6}H<0eAiizdn4V*}Vz|;Sn#S5Es$PG6&e^ww z@-@^o4os3Fo5V`t8;Mob7}zPRELU8CS;3#`#DD+~b$|UCao@8%xU>n{OOG`ma(UcB zV=}xd=S29aUi_=_hfaph%V!@eRE*@EZFtP0mY&Hl<@$}VFOZ=U{OrQsG%spIHxWjx z@O=N^xJLk<6goCu=!tKpxmL+x&vLI>qhp+OBqOexNgtfXf$}>u(eB6kMv7r9 z%?LvF~VpkiqYe|VB zaVPsx&7+$L-Ua#ADQUMeC6ttGM_#@zUdNgR0j8RoWGJ*z-hH+5BKL9`#aZ$gDZ)x zb10R5RCYBQ*b;bz7f7qDr6|JK8rXK3Zdhng^S^R;txA$oIyK?A3W$s|tP+&MBDU>_ z>|8{<5e#E5MNy))@383avN@tk>;II-{wb#+vlsJH%az7P?;2Yoa9>P*bnLYP)Shkd z5BU#gSMhfF?t4ZvJ$dMLK{+WnaamGx6|v|^7Zz2DR&vs;7@Q3Lm2Knp)iCc+4sg+P z8l=N?QiE<#4F`93o^Ne=&dpu_15_42W_OdDg0Xrp*kdh{9$a-hT6-2W@i=(dg7K|V zeDbLPu-vq3pkoKH9>c^6H&>b_hfELmv*<@rm~@Q}m+#&?CgmKEHGl)j)-W^ z!gQoeN34#yy;);PN05(ZeO=cP?Bo=GEziVe~R3$G^z?^+a=Q_7=H_7=ZdTKzh#4P8j49bqFCNuf!5Gcf zsW;Pyu9=cmF-Dm`^LmBgix)bKxY?U7ac>)Zm!_L}?t@*K`aQW+#3q^=QtPG4rG~30 z&if-*w=^qG^v#$>ogzWnm?fIq$&#Arbd>>}8DgYD0-U-z;X}Pqx(Lb`*16J78&VeG zbW48n{{`<05cEP0QFi&Ra#Wn2wRF>p+$@h>_=V!B{B7`WS(;lbYhMz0dtcKmXM)Yd z(3{1*wR}eS1cp~2s|=2FUt`<&Z$zuqSV~10PF43dl}L8<#&qIL$>_zK+7Rj}g510q}O` z#CIMtztC;&^$VE{uJ3CWLrg; zv56L4yMc>$mgD4KzO9bf$**IYFstRBa5|o*v-v+M=sH(2tv}{wiZN?r$i5)_8So#( zO&ZTa@lCU}y^YLwcHd=|OL-xh9ucExnM&>S&N^|$ay|zAnLZ472U;E{(6o~-s-pt> z{%Nl+z~lZ}PVzA5IuTz(O&BqQllOq@*wl9x$To#6H*fFLZaQb{RI1Oo?7JLZx8RQn z>zZVmPQPaj<)nl>kxqhG*r5yyD`XM~1Z00&@{f!E017@G{7CRk_riTr=j;o8ZyTc7 z%8^{gut>6ojT|zNi~%EQ9giGWxLDjg#*@R!*2)pyGNfz@lCs2>DNI-@yk4l zuMiuu*m1{m{_mxHukl~SejwGqZNCBhHJ0jFw4E~DwMB|2Wis40Q}?GoGKI(sz~`LT z(ciSbt$*Q9f!-$8beS=67MXA(X6I=m867zQ@to6=mF<1ZEgq=wpNAeF@E`3h;aOzY zbnonoUk>W?jT~8A&jDLPo?KFa9E*W8Wq}+%<@XFu7m%k0Iw37ypslfL4 zaofuAw5t6}NE^%~`tHc>ULX4&FgixN8Y-lA(^%R=8mQWlLuVGz>UxusNy#<#nB)R^ zrCM@~teA4Hi1055e$9UiJRf=escmB|?dT_M#-nj9^xOOP=5}9O;O( zn@rH=HrG>{~6P0fVVx&C7KoA5W!oR>aLsq|H=PM~L|A z;6H-?BWSjM9Pu1;{hLmaWV8DL@k&O5E z`c?KtRVt$aiDQpbS-eD`?1ayZyg~adT+iWsDLe_R$p*h?sl%zuscI3Kr=6|CDBQWu z6gSGGC!;x!xeEL?S zJ4;pWb%8H zUgK_d$P{!vYc6JJqoL>8r^25JYuZ#ACbw&Eb!TX(ki#@e>l&ci0_Qt;>yEwYnh(Pt z1Nc=$9v0H9tk>q<Op{i9Rx{}Wc@n7uy z7ma*S`qkHnwHr7uB8n@^TVSy)-bQc0O7Wm zEc$kps6_G3Eu0Bk%*NxpZsJZl=QZcw55H#*2lyuX9Z_ztbz7@c5!=mv@wz~x0C{l* z2(5+2;GwC~i5jPkd`aS4i;GQ4 zb1K8-g_&X?0T`3-1sLcE>s?pGU)kBbTcqed9Pm!DbK&T`KYq7x3wv0m4t6rPm>Et^ z4pq8?lh+lC@Y~{#jqbib>z)MFFFx6(Nvhu4#T2NZ+oTGNzFQzx{wy8kxcBC|kJ|6W zTK@opzhnt9de>E-I#)8Ne21r9cn?DO`{56U z_7|5|k?41q8jhu_Tx?@=c_OMs=3_Ytf)pQ9-oH&ei|Auxd3`JSE*up_WmO~r#yRAY zYw;uC@58?eX}`4&iSY8)2Nn=%nr*|emCUorBt~M=zVDdtIqm7wiv8Du)nkpqGYp__ z#Qy;LsyC3?nK{JHpWNx5de&Mf zmf%_*6_tq~4EFC_Q?D!Rj<2gltDil5H25c}_$T0AoA75@)a5H}r?#Ip!~sScV0SW% zWc}ls^V?6^bI0dMv+#$BJb8C{;oUr>No;K+hcQG!3dKfSI2`rk-oDMfwTT%}{K0@@ zo_Xp0DT!|cUvU86!Q-t@)+gjOhF#7>$KC;#;xB_WUjpCimsS?q&7;^!EwDwFIObiY zknP}+kUvVu_&@MB!9N6i4Q24-;iOGrrr6u1xD1H$%oJeBEQA(!W1QuVJ6ElJ(5s!m z003@B;h7VR(^SphZxKckz$`JqJ=v zeJ3nxB`P`!nN*T?vipPCMRk99i6)?&a9{qO} zn?1r?U0zX@xKp>%B7dl?`E9&FM!|BK?YaU#t#wiY7pXr}nxGL&?qQNTX1HfvMcT%( zgG+Or`xl8n=jt!}@PGKKKF8ue`TC3g{2%@*x?PF=(fJBCC-+C>Dt@!%Ri)hj*+`15 AbN~PV literal 0 HcmV?d00001 From 8ea8988ba732786dc70270ab8c0381bcf2a33b43 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 17 Apr 2024 21:00:23 -0400 Subject: [PATCH 305/329] update --- openai/finance.png | Bin 0 -> 75011 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 openai/finance.png diff --git a/openai/finance.png b/openai/finance.png new file mode 100644 index 0000000000000000000000000000000000000000..b274c4b9ecde8b7a8f2339edd2bc9085adbc8bda GIT binary patch literal 75011 zcmZs@1yq#X7d@;<_ee^YfYMz8QbQvp-O}A1(p`!m-AH#zNh1hIcY}0y|1YopzF6Nk zYq^%=%yXY-?mhRMefHkhPXQ@|Qg)r$O+|23e%~H6lyk8_mw7==EcR9`mfA?D;$tgXapSqR_Uy{_PU-jWjQ3$i zPXpm#a}vZLF(mHN!pKNl%?UrLu3Fh>qF7)NV~|{^7jM>nvNbd~Z2aQ6NIOX@-C1aE zBn!Y4MkKewCxkvFfBb7?%;WojYO`ILGPzXVuFe0xV1{Bj|J|uO;srT5`S!b7$n*cc z!?V4;O>i$sN%eT4+0Ji7q{ksC;s3rM{P(`(f8WN`B3;n`_c?j1@4riM^$Gvp z{of@qoRMs*%Lm zA2A)Ke!ik)WMpjixIo9isH>>>IW+WewUK`2ewQKSPeDOpo7AQ!AuUbEz)(_LjEanm zjE2Uc{ZxEpbo8jPwsvB2l8c*Ljd-rv!{z<^@kp+P&E;iZ%Jud2V07Zb{Cs?F+l;!p zx~)A|yizR{l^~yG*GA{Pv7_Q*dRQd#Hp2JZUY?#X&z=!lTe-1d9)J4uJSF<`yL5rn zq$FPRFE>`i0Bk2xOJ!?o z>lU)bMDf;il}S$&F`;%tULHJ4g!J^xjLru#2&!mfs&@vh8hiBBAfA+9A0?c%sGB z)YNHj+@5nSe0=&$#zjm<<`=jlpUeJSt~8$0+FXMJcL<_*9L1CRcQGrjl-4+&VO3RZ z>2|`!7@A7)*PU4Jn0XP+AT1B~H!h1_c)~&U>@+k8`!JEoTs8~w44P!`51(OIbEB=_ zX}in^9(!yIrY?G2Xit`EiwFzL1fBjC!N|?Y$-wVUpKo*#x4Zw7Y;%9P8Z910b{K3T-D-rvx4~PKG7u2*K`Fc?Dg#8iOfC-!X6jV zUGfr}<}(%1`CwA-%Zt_rVnx?cSuPHlX+J+d41vohp;7f3^|s`<7_UOOT5gZ~6B83> zYpq0Jp-|{bQi$hVymSJin5Xm3#NM}bZ{ZOfZlwF@G}z}>Rh^t}jgrTL{S<__qD?z_(0sA*7`k6sL{f_5qT6@6sxw zMeZI=%oh~X(Tw?1v(6gqA93Y5yB^}gR8&%gY(KNyygUjdTLA$)Ir!mV6Uu6NT3TWm zJe@#TUWY%Z6h0$4k`f8o;?XO9^-<~Ri`8bMwx8LhP}Fu?AMOr%NPURvVtD;_F1AJy zRD$&c&&HI@%*;XzMF+uxhE4rYed~&uSf4Gw{^Kr4!Xn@LvUpP0N1KC7;9|}plnLf2k!Qb&TW_jH+VYgy|(L1rLKsb(FzJ_(yKssJ8d=SFvT_kz#EU4A0SnH2)u zQ86*`2wmGI>0akYJ)|9i%901`>u?H}iw}3YanqMrpJJ1fElf-hIE36V9{l&&cb|0~ z_4P?%H-nHQ>-#V+FqSETfV~0oDs!RIH@Q@P4_q+;8Mp_rVe~I7&DtiSiLe5y9MQyr z-b8JP=;+-hLc90J39WB&F^GA`nAbqW<5E3SdFt__vAv_iF4m^y3LYAMv62xTjp4oO z;dL?Z^1JPwz?I>_LbEMFKGkWLBQY#$5T3!>!~Ma79&>JPuFu2GA_mEukk24(6p141 zmY0`fCe?tw2eoo#rupnlJM8G>v^Q2T+RxN_JFcqxlA3x&PlU;L;D@Tcy*(K}rtBq= zA2u;@q~7ZFB~}m`pQs*Ya6PxM_x-IKB5C_aX>w(GiWE!;#5a5>U63?E7`eTgtkD(4 zFPJH6XURNosEY>?qa{w!kjhj~kIb7%s~*#Qb89QK?(+JY;&p~fxf>PH?hBaUXlTvT zizf>qSV`FQuxe-A$@eH&T_rGVn$@PmR(1117s55MY7dt^f(vdN#6)dpN8^W?vO1VO zU(i+FzhBN?F0LnN(ydXu*c^UF^VaGpHwcX|FDgUo~N=dXsvNyWs(GV!6lB}oJFrA4vMc|k8X5##z#ooOZ9-LH42 ztCmINp|DTZd8ORP8H!*L1$^JxOd*7Yi@d^g#b>y8w6||p{ow>MsS7AznIgd}3>bp9 z%=d6Fy1ry`m<*=)`a11Qpq)R{X7+;fRO);}#X(1hvhf4)=Hh_##f#873?gpKc%-CYdwE#Q!q* ze0c}s1*@nSeaiDK4zFv+IUMlXW^7OWNQf%w@7j%4u#XjzVV=>2b9i>Ng}DNijSj#B z;4Q~&>BR*BkN556GmJO3H+yyIphlZ>J8df?{#JU&A4qYV=CLoNk1FPRGQc%kXG4mQ zKV|!)r3{iAB=~U4hU^sA1Byo>Iodiq6)K}aW&ZB2Wq!og9{2vi!NYFVkT5&B8|~7? z!D4hfiHfR@PE;kSzS7L31UDH1V(3T+sA)dGzOsPH4Mcbwp+|@r7sUAyMOaFi`=~K< zg!~Ot%jL@2&(C&&&k)d;WJ2H!g=PCcMlHbxz&=&teNIG#s?X6(jG&~bsAsnQt~Zfj z3x?3+dg~j=vNvawx=h;Nu}hto*O2m~%{zrXzC*iPLV@g1`Wp^VwWaRwz*9JWS3A0NRQ@ugvq@|Q;jZ6W3chhmfvQg z_xQy>raXBx2~;guYFK^YVHB~DurStvYxdZISZeKcO**ywnE_${dy^^8T5 zq&@pO=?LQH=Elst2?OKLc4T)sS!*?$A-*l+DHMRo`3!FwR3}-kc|+tU3r^>|(^15H z6!<@X{*-44c{(VUl@_?W;vMDx_&tb zLJ1|}cgMyPf4=1Dc?8e|#}1KS5ft*{#8#uDIwU+?WOtdd{h+s3!t@9vA<#zQU}5=3 zpC2C!$HbhPms4&dT|z@<9X1A+iKs}Q6GOO1L&9QQ7TgeCA-;%=j_&cWGZDtZW7G;k zT>S1trwDt28da>@N@Al0(ym{yjqfZDUgt&o28A*MM_USJ)+_GGub{F9?gN;cm7T2+ zvH${zQanoZs_+kXwkauxId+&1EnFwe`cAoi2Lb27tHW-i{`h5NQ1UMT`dMo8*XMNA z;If_zG4Sq-dsR<82Lu1nk7(gZDuJMPli5c+N7%66RVMGok6}@?n%&PB>gKYjH`+Ot z(U|e^F{E7bgMJDxy+p*geNy|h;_b)f7yRz0Z=W3EjD2TAgxA8f3E+MELjMhSNZs2R z88q#}2Z+H_!j4JNLo(b;c~3jjNONK$UdR0AW_~TSd)Rpp>aHC>89~D-a=7GI&wZa8 zROSUTk9^kM%|%B(iN|gQIx+9pLDgmDyRCZkENr`Qj6lX!L}mElK-zXfm)$A5wO&Zd zBIp|eu*ik-Ev>8|a%%J~tO78F0rlFIhCji+ih&KW?bVW($DBh^_1`MYldUJd-<_>v zVvLFoTD!YF8R8C+o_C(Lsz;2&mP8uxYx|a$msOYnZ%r|%TB;QqPPu1GZ5;}#{PPw* zs~L7B4MP)?gP9tXMFL3-k*(%{xntC?X73rDQzubGjX25ch)tGd%K6bi=rca+nTU2WR5?m3%|?j2sl5O zyLKS;GLb0qU!4Eaug3>Q#Jj7I0>{+GX@?e zGfu@!O{sa_1<`Q`s~mdiGsKh!nXK$dg%Em&T^gw%FZc2xM!tOa|Scr+_DbZ}4h-RrY=y#N37^utsC@*tZ z!fIqe16NT~lZ$Tt z#+Q+Z=Pe=&77`K?0QuuPXLSqi7NGx7DOS_}@L{w-iDs&|JCYDIO{AWOZT0osDH+C_ zb&ygM3!;ca&`4;5?+#}pXdQy?p$-Hr$Ty!pe|BqqWN12xT;1IV)A$J)>+RS3Dl02N zJK(%Kb<|C0(;5Eh)2ES7OJTbaE z-9nQxshRx!a}N)XMUcRxj>iZz0UiXoq1hnI@=9zsAXecST8djtD*a;&M*Zkm6x}%6^O<{ubxB)r^6^q1m z=_vz-XIn>9@zo&T!Pu_t^6{~~veNjr7s0z7Km)+T!kTi%05BOH9pT)%cNeX!+@-Cu(uyWzTe;9e+G~G_9$<1_G-%wO~XnM8(9&smBGdlHfR` z-`M0goAJb8?%W)-wk}iYqG`Y&M+Xt6n^skpPVUs)YN4!t#2q^&U@Xh$h;fULj|X9; z*Zg`TEiEF)zTGt=@MT zAEid#rM=;CJ2tFY@HPK6X+KxcOe+8Lw>_C%Sy3vXjI^q$Jmj#+%+rqtCe+ljS~qYa zzqnYLE61!06D{j|ZL*drluwq`+=`gt8;9bHckGp9420xq1PqHH^AU*llXs$pZV&`KfwFabAb=D;brnGsmRDO#G^^9 zOsfB5dW#nkK`Cx9=n4aXvDVvxc$QGOcCt65podcLGY8!9N+bM-!KrdXGeNHT6Ul zYTdbbM7ug*5H;V_y|wiLw4JJDI+1u_Un@)N&`?o% z9(Caz&V!nAez52>oFyhHDM<;9zy}EpWLcm4tBs{4JuUGwbsOdpklGMPT%X{z`RwiP z9^&HRC4k=~07%Ka0&^k_Y48{^(DKauGr$?o0(c5Y52G{33_ zXFzkRthm?`upM8%d=U^3u&rroYU=D1y=}0XtxdN6j>Kd3UH+{CWt+avki=(H0wS=g$EFHQkcYR~Mf$5gpK zfv#loV{Egx<34CO3wyw7}A4#qMvs5+~L^{ZfwJkEDtJbz9RF-tTLh#PaCw3Ce?K#KhC?luO` z`?wC9OG`fzyB^oT%mbRQAX~FzA$qrUa3E;E)+?P7jYTcsi& zG<3?8PZjkR7W{^8)4$7C&frc%>HGNXs<3=m|LI1i0iDOoS)-35XF@dK!T#NuEE^ga z0L&US^`9_CMMcF=@$tsjY~`a^~(OC57yA{urYY4ohFFCZ4g@ zxu0zVaxNt$1z=l{OF(b~D&e7`hh%5GNHq!~C@2U72(T`xav%a5fBpIe!U;5Nw{o(w zPM{c}KSsp$=@!_lEcz$%$HY<2{I?D2)8#eIPiJ!Xb(UFpetZO2nzuf|H(L?&;4y@D3+Zu z_`M6WH5RITl&&{`Zv(775i#*N)D48vBUic*&c@HruUeuJNH;k#Appo-?no~FN3jd! z%(iH#BlOp`5XjSjG$G=#H)M;%1j!Hq9$w(~u!Byuv@?Bgwyt%jv=M;Nr#}h#-Pvz3 zs3%*&^Bgqe!63|^UR(rofIfY6dYXrk5&fX6r%g!KAq)*(#LVoKa%D~q?8@WTrzH!x z3?j7t%4tQg@)Uih!NDZub8@ouf3lrKGPAJS5+Rt=5ox9xKgh(y1TQbIyPKP~!l811 zZewF(a4^FC8~Zgxotvwxwg&(N;w(h*@yLpci+8~aN=Ss%)o~5Z)!R{uVj{Tg%{V>M zPg7IM%F0V!{16kJ}<%R^9|LHew-m@EZAzVPJ%AOY^dAH84@8y}au zdU_VYX6gt+la`Tr1Bf*@Hy*~RI-5n1NmAZe%~S*GN{IGdDleQ+({d1@E#^uH)_&uG zKuu4@%uLkdBjm{9XL9=f0N_)ufy^5~J4h?JUn-wVZB0#NNXX`N6^+i+)D!?~iKrub z%u}eVZt`|&vMew8fIz#6T8E1d~uKO;jA?2D$G$31Jn4+Tu%7zw|7h;(gAq6jS5iV6y` z!NI}V+1Z@An2)u4Zx2md#kaVi00|9kZ=sp*z#EJnpcV~r=Caoo;1v{H?CBBjk5$st z)YQ@{Zgb;&+&jbQDdx6kn`hU^lp5%UU`mA%nGD?_!iX#==;-agESe#Ib>(Uyg;;J! z32eNFKYI3+j4TRM(|4}}x3(xa&bT%VjY4y%y3@Otx7i-YY#`FC3mY#A%+7XfRIloc zdv(0z2Rh4yPtWHW>c;Md9(pNJ@cP)Sth$!kR*rgdsY}%S)7ZxJydvCW^E>Oa;NVXAEqOSURheMwWj&#q@AyQUa6hU z5h~SqN&OC2L1Bf-2l5OdEZN#XYvez*+@J!Qzd5VAyU3&~D&9YNcm4i+$K+&K{QFmY zh4(MDw7d4|`X8M4UdyG$iJ*cuXViDPj_eZ7s?*FuV zttgvT9O;BUq?hjm8H9y9KSYNT+F&nw(fk>7Vx%hq6kAfMw$b8Yj^xTkE1sO3!KvwV zQvPV@(`TJcZpR^wjrf9TkiWizkS_b1y)1EzhW!j3CO$v@H)}hs586b0DjMqQ3oXr6 z1oW3{pA9+@^_hDmX`G=wuIK7@D*;3Xh#Zd2#-jgQ1jo(89mfw?;7*528syu3eQSC` zgoIbm@GKE!Duy9hl0dt9+!d z^gPKqm5R#B%u;^`GyAaVXg_OkTUvw#2WS7HeIWS--QUHiW+^OM*YMxkQbew^!MizU z`QH`NDSR|0pAAj=p9Z#18E*dRR?Ga)^^n7$phZ7PR$URxn4e#a&BHT-9SYap(EZ=` zu(ynelyY*iXU}YI-W6J?5|2yhpYGY4VR5Ht;(r16-**hgYP)J`+9y>nFMB8XT*9tS z8k9Ny`X{XXDaAXL-u)CX@bH+nh*-=hmchocg{Nk*!>a!1CLxr4%%i1)BO|T)5H7& z?%JxGfw2$&;+W$Db4arF^VRP0+2v%Lum*?q+fU>oYmOfbd~J#T18Jo(yOt1~mlPD8 zL1@Bdcp|}J<;>GxcGsv@R@Wg22+&VPZJdAOlAMRfzT>%%&y~iW#T)au%cW;mChyOktDAy>fi6C zJ~WXE>-}X&HT*YcjE{}=rvG@myi1RO&Y7gHrK#DPjr4b#I?aC$;HaxtwoY~BdcSB7 zQp_Kq<6SxPR|bG*j--bwB6y;m;*07D-CZ=s*ZGvaIv z{?v|WySwOT*xlXT#!Fo$6AoYJt%FRAk6SIa@N;nBlmUd<3?x4pIe7|jS+$Hm*Gz`J;6Hpi`zuIXtv01SbU*K#(Yx#+qYxwf{} z+uJ)cGcz`(oT6u85gu1nS64PVGB{mdS6Eos*WXV}ObjX4kevv|?5(V1g?;WKm{lQH zq!~>a6Y>^xJOqa98gRoQw&t8A3SO+aHOA81I1;c!KYvPA<(9SS$@_LvW>0T?udGze zt?BcrPPxjEDH%6+fPBIy7nO%Z&iwMktOcN=lWIhRE-=I2KgG!NzG-t*++0@-E(=25 z$^Qz4055^pUnnlF*EfimFRZU`CvAf)6a@w4NNZz#{p#wfU=;|IyK@a}&+oLMm2C5^Vh2KqDiWKE*m=;SD3>*WfHDE}uaO3LhnIO|A-HRbDj-_b0NJb7ZMgin3D-$1aYW*JnTV zt*)vXwbUqzD9g|96^lHZD1=ZD)Xcs1|45!LL{=1Odfota!uA#Q}i5NEdaUY6pCkF%R&&fd%&nx>TZm^pG9~xm}cX_yccz6gDfi?J}qa#~qeMyyq&Ke`cWs#*dg&I`_MVGp??| zZZMqlej>bHbt-NfHBrO`C-_V_Nm?OR;vF6yVaN9ROCsX+9ISuIc06k`ys|R<9uI2D zjJVo2^v_jDC-$vTxs^GBnOn`!ge~n~^>$|DJOj0dk^ggiH-6TC<)^by`}g!!mP4e| z_v@(tRi1H0jn})oYk#`%*o1w2Vn0)Uqr&JNqX);v&cRFYOZ)^fCQeCX^z>xpNCy5yDRR~PynoY^UjR91Wtu2UD08r9teVIO59jvL zkz@Sl&Q!yI_vf|~zOSuoz18s7*KZ$s*V)ZF^Y4dI*gLFawOT2?_f&5CC*&F>c-w z#J~R23JNLdz;E9WF>Lm)_GbI*HqRn>{#_NALZl#1mW-4ao#{x^#_3L39nD3DM-OF#eq zJuD-mKZTTp@%3w9F#S8rII+oec&VXbpV&fDQv&KPj0PvS)@s3=|9ch$e<3-d^V7KO zXLNMQ98tf6{Dl5q$dUOc$lQP94GkNy7NB#}6Nzpg2`yGvdV zXF;8f>36rwY`T7Qa(1Bj#5KNhkC29 zck$r@)SkGx8SJR=*p<21u;>flwOP$}?$6u9EY;g(IhfqE3rD{bh{bUOyXj&cG$U?< znIgy4Nd$HqsaZCufSl!$oMOj^g^X1D*2gW)lO<8j{Z8hQ)R3hZ1 zyfT$s>T^2Pu(O?=Xi@QS41pNJXNE>UExEWR^S;ltWMq_T^YPFfzVrEnbDBeS)cQcT zfs`fpu-{0*@dYA@*kG{agDJGe*@rH>7ntB3zTKwRQ?dR7U{3m{t_w|W?E|to-zG_? zTeMN}$^QI`{o``8cPMr@z_rnOo|N3$`YZhA0xvgrWg~rkGhJ0Dc4H`wVWJc#T5zzQ zDnQFF5}PNwsK_%C70@uSaN2(uxP?-6)qh?-I-NDpIn2LhtxQrbEUcWG#ilbkY>z3& zCWd|hIN^Rr@BspF-_MUNEyqKhqRX4Kl~wCML59lx0fU0PJ$uXRh*`Jr+WqYNFU_W= zfbek7*py%Y4g2fo+E0TA@1x1qJ`#PyiEeTMC+5}~i>~eQm2p)glYz})gCUZm3C+qL z8ZK2@U8;Z>I^l1huR>Yd5lFGCjK`y2S)%`P2HQU-zUQap`6R8USC=2$%B3A>dV}$U z0yYE6b*rFt=3mA{-~0Dmo%{(6faR%VrW6SXWRrk#rJ8ma9|IxG&M|HvU|{ZB4&>@8tEs64;nB57&Tv#t@CE&fRVRPkfQ@aPtX;oj z%?>YtfH3*EhomYxVyulI*8iD_3D1+wPNd}xnl#reZ#A%ev>@Sl{cz=se91YIAfZMJ zQRX}@2fV5(i-Ym1e>6XJ;Lw2V-TV0x&0(c?Zb3o!e^NXSzU+nX??(Wq4vvxzqZ7cT z>(_bO-?avC%q`a4^E;m}xd#NO(W9eN6UTA$Y!O)MANNyQix7h-$VG3t$^*?UkBilc zC5Q-|@oW zpfwO}G;1w^5b3^=>T-K~Tg>U_$b03mj0``}GplJ`ZawFeYD$U=_$+~~E&E|1kRMh3 znwz~qM>9V^4=kLz7Af!*z;g(U#(+Ka0#>#n)iU54a}*M41Kw+HAU=Ofb2ne@js#R2 zXm^kRYey_h)PFdq&NZ3k-_HHKB46UR7Af8o7eEAqD{gLEOmL4dKz^>30c9a4NAwJl zl-H4#mezDEU%~Ybm`?X*YIfQXNOu9mx3y)KLq|e7Iz0vGyAen%M=&jFE|KjyUc{#- zMT<|iwr!noVq@F4n-f7f8Ae`Tx94*!9*-%+98(Sl}z*UZdhUVn#3dFnP&^!!UtZ$;KfQsm4PXU5lmqNG(YG(R|0@JJRmrDH4+#VcEMSl&!3e5!Eb1o zGY*Z>VG;3Z|efTvsi{dJ|qYlZ82lNUY;5S2Or)yY*n9;=>x^V@xa_ZG? zFS8}&{>X|^HqqvLWZDP4PTCp@3k%bbQ=(aWeXJ!c5e|GR4uDDq+p3lj4&L8%@m`|` zg1pK*4m$PwQ<^<`k!PbHZScLPZPm*r6TWDRsshN>`y`I2k1CET? zUmdSY_>+qS$)vrx8ht0&6}!;vF_g-uYGS!pK7L+LJI}-%!|+@pQt&k{X5{Ob2&llM z%#H;*m1RbGxjcU3msx9#Z{NZ{e*9BR?^W=Ympdd5yCL9yq1j;R?a^-$`|+I}NF*nk z`5>S$fQLNZ-@k(<-SbmQ${U-7H@7Ze+X3s|MwVE(Jlf^mfsYR`pYicEf-?xCh?@R@ z8-H{J3*H`zn46mer7h)d>svZ7M|b=^-4cNS^pc$Xe8erF-I-=LtHf;UmX97P?&iIn zF|B`r)F!f>ZJf8BtE-?sW}rdYy}S3GP`^gX+Fz(YkQpYVSiPUaW?Wb=Lc;$%-GYHG zAxwlpJo`HiWDuH&5w&x64dkyghQqQQ?j-p@dAMwjY-rRi9Rp5FOJ(k%Jqngib%w$V zWAB!fG(YYXLEQ*aCM_)ut5T#-&ocq3A-r1v`#vBb;8!Xi-Z4$Y><~8(584Y>Lm)kZ zuJICU|0$c0(GoBKL##q~)SEJG#VB4xKDq`4`~mh?TQ5WlViTe_!8W9N<^XC+aXG5efGZ7%*0#WaEC- zGg20sng#xWfxSWfAao^Y5D-#Tum~>nb#zFw6sqbd+EqS8bibyYL8XBStt`yQc)|qo zIy+%6C?CL|Gq%COhl|)iF+vbqV4jn=dgxzWT@7T-RVRIY@)o$3{yyN3%l$tTRIM|3b2P(;?eg-f3_-=qs zglW+mr13@~@P*>CWeODS0-x8>6fE@03uK@J>E?K=sKf^c_W=1(Q18db@^WSZeT&~f za|CuXtGRjz7e5J494_-$;9mWrC(#6I1DO~&y%kep%CIUDBOmSP;^NZM{qxFrQ2xoj zB|{xxO=9ayCj~URQpoy8K}e{_>Jq9_h#D8WIGotJOxhEY7jA6_jaeeW809A zDtj3pVOCZT3D1MDo3seYVG0Zi?1cQeczFugwjVk_1ZpW0TYjMmI$U=1Jq2<&m(02b zCDh_973j&5l0Sh`h;Y*i8hO1Kkm5UbfYM0Te?5QTZ8FKpY1ERY%gV@T4J>8(Tj-<$ zw|U9d*T4(@XCQfUK7>{H(f}y)?1V1N!f`~saYw@a>LlJ44VKS90&r#JunwTbXHS|) zOB+GCyR%A7DFnx>s)-|A4V77njiQBKd|U%=8-@0Q?fiI@*YB^*9~$8Ctf9gF=42l3 z?%%UxcW{K)B4coJoGd1{kG{nE?agqQ7$a>f)Z-*GnkoZYqx;q~HkIMKv(>`k937pp^8yX+@ z>j$~JMEi&HkEY?{&$LcrS(M05PEN*gyUo2NHGg#+bvZbQohV-_`YI0-zIdgmWJG%E zwC!WVZihc{)Q-aR=4Q7_-UsA=04?x+F%S{u{fQn;YwzB@yX7(L#hjvwEa&(Lm&AMK z8w?RbY(O5fB^8#CmIrK^hQ@35*0sPF1qHE+u6t)(E-uqg_qH_z`$Y+z*O!)l+dArX zKn*xLA%K9d9#F@?7g^oFl%DiD%~R3PILw0dZK%1q zY$PJ`ClsvsI6!thGBqr!3-c5&xX9P=UcY|*hM(Wd%L^Ff9*>oj40!qEYf6R1HQ<_! zu;GiUeVwbNVpTLWJluaIZhpOY4&7-kfgx9cf>T>^a!~|2@xb^vKcP`Om|EmBOz}*? z7NV)i;#PSoeE#7P?;CrBSaGul$Has4T<=jffi8}RgHuy-psT2uURSNW@k|s`adU$v z!UN$i)no8<(~)OfTk(RUW2Q^&^0N9D92$HBMp$DbbLp1IBinKE z<5hP+H7&e9ITaPKGrhfQa6MYV)AeKlKA93vpLHwSdR%o+1OsB6mnK9BUqa!NQ6BG; zlY4ZLfEBqF7*LJ*^5x4%ug~vH|BCka2zYZK#}E?8nqKEI0P3J$+W9R4yoM0xdo8V$ zn3$M^gy3Y+MGZpsI&^2xW26d8`Hl1Q_%j>yY3iJpZ}lHmqXl{S_=>!$P~k-Ywvd&T z)!gEM%K4GjglkJ@o}(aP;F%2eCfb(NlvVH)A_J1tTS}@gJ6gaUFo-(M?veN`?dFPU zk9eCqF_D5se5%4(T%x}mm|eueae278^AleJ;u&zU0eC94v66R~x@)g}eHoN~c8URI z#MbsMJw!wrcL&gZqEM*7!|h2u=_AhGjn4pZcUf6=@&Ms2q8XC89&`7g!I=qoMrUVE zZ#-=5>d=GeU!rArEiDYA(9Ja}pWC|~HaaUC3vYE87#MkeZb0~YAIvubwfxH?wiRuA z3HY=Agz(?;hRW?~5fI_hSm6KApIfYzRI2tyg>Q&rCn^;yOj=k_WbE%>-;qnz;Lf+5 zXm6j0NY2mC2VMm`kjemq+OUNy24 zA}~5NZO%1R@mkMPVA<3RZH;DCD?4sc)ZsUrm+fx7%9F#{oB25@AfULEWNAL%pd^z_ z?R~l2$5=WU&eUo-P!Iehdn391K)HA{egHcQ@U((p5Pe3*^~PC-n=hQ@!y2U4B+Vs)U(F+r8!I=+W`E*q%CHP*PR%5Y_9h0E*{A44XgunF6ynGu&5W6F2{6SrL z%&}cn7OYI@5~Yr^)f+g5zP(GA zlaw^WQiw@eXFkzY!}{NV`F{4SpZ`U2pPjD5sB~@`jVmeEW*Zl6E*a>o(ZI>+)RdInV(UPJDxY))&065o0(Bl3{Fec=hk!zaM=#l$5K_o4Wz{mXMGDzC6AZdgt8cMf}Y4boJ7R z-i9e`U_)*tdy<%FaN}$@YlVj`@AyR%G6$O``8yuPj z20AONJ@60~vI7CeOu|4)!_)-|%qHj#s;V4tTpFlzaxa$=8u})CV~;uPbK|uY;$t|G z!mj?aR7L~zfeqLRa@vNar}y{w312N9Pbu<)-Z{5Vfj)IkOFu<%^f+T>dv){_>TGT> zH(f=+quw%AEBodw&Jxk`xxY{f@5_Ysi_7#PoOD@$o`PJ&V($L1kbygGVc~wzidxO{+l{aHK^8U)V5WvKW9BiEJjAcEiKNz>wuy3jFzis&w&4 z?qfEv?(4J6&Q2W}b^1&*c|aL@JrPBGY)2r_2*Jl9mxyN2k)4l2Syj zaObxWFsrK#)a{yE=kNZVY*;+W@5ZQ(#m^#CU1QQhN z4FYdJn{s+72wKP4Rk3`Hv7jU;e=6o(ZbrHQ9~T#=%r)*BpsM(5CP>9|a-^n?%hcQ#j;j5_qQD7vmfQA65QPyvMx-67Ee zpT=2hh@K2nOQ@`c<{@)wVH$mLV#s(Wu~P-oEzsjYw#)PEhE6KyN}Iqk4gN5uCtiD< zZ=3b@ zM{3<25yPLvNVpsPqbp2={53W84*V|(O@IXgw!5G&1N1Bh-0c9EKk9sk!eif;0`v8* zlHFg2HVC1w?`h*bS2WQ;dpg8Jf@lGd(*$D(7&fJW#C7WeD(J;E0+rZ-(nV#5W)kM)ad{Zgrph+`Ak$3La5C_sx4R8Md?_zh8M@WSB1>BlPs)QoO=2R4bYQK9rKSfy7fj?O=mO9G9AA(#F%kyVxn%;)FmS zj}j!w{=vcSY@jp80@k4M`*+q+vw>Y%4L<@6eH*6xTa%;3hrTck<+w*jjLM|^v+$4@ z402EH)X+s&N1jckRWjldJsvIL>VU{d+0U|GLftQ~U(?a?XHvJmWMjkX0mpI=errHS zjy6ZTlaKfi2YMtNnZ0o~5#uTt?1KtCjh&$%%*Lv?ecRKYSql&b& zzq@*7v9Wv2D6X!K5lO)2T6ctfV_7#xk&5wd>3Dr*j;F$@$dW^}1m#N?n~C8%J-JXH z#V~d6ONHC~t>v>bK0ft+pL6x2{l*9q;ZncC!q@(7nB#{P*ERP>$!Tc|Ks=+J1Y{Bb zdY}Ss(^t6U=0VYiLELzrCvw}1BC9BM&}zycGih-jPO-eowzjTRCg4#VB!=F@Zz!dP zeWvMO)X}lee6h)W?{S<%Uq~RQ2nzXppPk*GsMmAPfJs8WY_eGW0~)2c<|yK$5fm&A zu&47l{K<&=QXEf7Q_?X}a!%@FiHM4A(?rf`9Vd><2$YRbAkXM9Xb>FaI zn2X85{HDltO2=JNAac=@vgJOhB42^b$2Ezdc9jB7fgqFBO4~mbiG>uC+l7Ovtyi4X z%MlzHD$l!?v7%a7=M>j>8^+;mTg$ewY}>Z&TD5H3HkVz?*7CAlYpq&sZF`^h zY@ZHx@c-$4zPN7AL-@1oT!w%|;z+km_OE5;XC}YFGzBuIO?oXB;5`Pue<1atNWr}t z$Nn2>4Rks^yuDdiSwTXlYt3SRF5DoQkc*L4g_)HN{o$!Gc*pdkKFE{BW!Gv#tPOt} zOxuO2f_GXX?+M5F5~xnw_2iTRO=&1AOzi(*6o%%+qM~xz-ICr~(7R4F4C}7C*h+CO zk?-#ZWE$!zhT??<5XkcnM?8iafk&HqwdGfS*P?WDdkfBV_@tyCM^X7|q&-2DG<`dr z{FMs$uP6a>1uC3#fB%PrNN+m{qEN%rZP(L_dN6Wxx^@S*wvKJP&I-bJ zEu~U<;6m{)C5z77LE$Xc*gRg*#9MVA*cMOIh~BUCv`G_}#>dY(ej4Bf>d&XaUySoH z-BS`PAaUa2;-dVwzNu+pV`F1+@e@EvIN14{#GFJyQT9+6ng-Dj!y0(%&g~cpWRG+A z2(qieo!#|WLmwLZPWz`c3-3gWh&=E)un7n}M7kq>-wz3AU|1%a$NT4&1De^zg1${4 zm;nq8a8)}GQp-2S5kTJqR5X5?k46bUOi|5+euDNBT2(ENwpt|Z2|5`?k4LUr#hg9Z z97^phaN{eebKP_Cw$ahD zwxauxO+Xy$p}Jk`%)Y4T0WUJNw(zTbbf5OAV1C9wx zF?rKLiu?b9wMz#~F{>C{U+bPl5=$Stg2sONKPadErkjuSE8OzZ1jz-e>(O7-c$&lx6p2 z+bn+&uY6iq@&2+VB}Ye$M8t?!k{KKdu4KuXbDhQK?UO;$g_`+kN5Y@*6x*5uS8%qpyuNagU!nlMn_8HTE=6`a?~yL6Y*2yMHP8W+3eTnWzgpY zl&GrEa8PFn4Gm4FRd#4C&=6eZ>7jH7`sp4&kd^exrSE1Vr6PVFBg4T=39HQWeIXFR z$NZk(!deq?v%}EFxe`sO8lUi%m`S;C80@gy>vLv|D zv{`k^!^?2D5CjR$o9YEpP8qz(@qlSSGTz33naYuwHFMn95h&UcCo!aA16%h)x6$mg zLnd=kGlfWds?h`^bC)Oy00|<%b`qo|V54 zVrV?f_o6jW-{%yvz^~*42di3qFWE}TWq?mPj1maYIfKK+ZTp{MFXKC4|`QMvud<*(tQ|r-?xHXu`5Nl`USr40hech^(oF50}tGlHir> z6b#(V2jsb@5g#MppA$i}HVgtk0D8a==sV!;dFU%-l_#^QbVXz~7mR|}Kb`zUXs)A! z?Kp`YFT?X(gIbHw!(AiFtViy1lQR1`=rL7JDFc-~_)mTJ4;2B(*UIdQcV!ttr?@?5 zukH38-pTUm6|6#kyEg}olgOnY-!?7rOj?$C0)t|m9o7aXE}L`r|L^&vr5%0y<_b_i z40Ekx+J6)kPBQk^bGgu{B4G0p?so(j%Dcbj3E1;-Yb^w73OD-;|iZmgV~g zE8l}5_x^q&9@!-CZdba?MVUm_n&4yAMt(;n24^^RQgZU*!otGb9Es>hp~Pl=V7%5^ z2LQM1PeQIDI221u!~QtDUR)HNr8t~=yhP1HesIs2NqqSF7FhcmwfYj%?#D6}zk%u!g+eGn<%CzmdwOzmv)MvLOFOZ$ z;t38s04DKl#G{K`W4urkj;Cy3aO|-)Aye~=xNt$Yfq=B(d0xTE+re497PQeDc-;BT z50jMi&OH*@0~ZI!%F3#n;CuoMIR+;6vLAZq_=uXEwj74`?!N#xyTUXt7svy@e*Xr* zqowyuv1fhl_4W1H*`TWP8@Or@4-Z*vrb*y%NHOQsp+;pxq9>G?B_yU(lXa=lC+aQ) z(SzsVadJPLfg`ZgJWcxEQM`IZ{CL%ly}NMO%&m6>b;l{8hT`F+v8^Gm(}b-$3)!<_$WXKx1bn>lzzQ&0dk5d1C@-aNJy2JG}* zMB3NtMij0cUD=K??AFt3Exw;fWc?08`um)muVr)e*nnWG)_dkuXy)n)op`kgJ5clb%Ik9daueSiEsPt@(}uV3qLoTSk2`2l{FRZ#Hu@Nv9LBbov zESWd*tjyXL+5 zTXFDTe}JQ+N)}4X%|)yq7B&;UMX?$9AR{vYz-kGTmg^1r|2OafXs{B_nf;iDO*Y9$ z=1FOnlEm4j^yGxwL=o%h_{yQ@pgBF=iMIa)i70|w+`97R#j=O7l$}*=?Nc#xoqM&u zH|rO-O;c}gp7w88g(1K^suHp02Xo`ya-||*8c9eBg74v<)oPXLjmSt&GRIogXd_DRg<^bp`7^Bxu5NjgkFVC@!)m}#i}_?GowI@P ze-nCaYz(~dYhwjKB>aEQPWh`YuIn{;wP&$?z6d@N~n-OG#wc5tcPG$;#iL+HZzdgCV4VJpey}yN26b(h{f{%yi6I-eb1|j+{ zT2z!KqyH2X6+t-o4Imcl3sVI_UC_D$5Gp{P<=V8bU27)2+P0pRG1jO)DBye7=U7}O zD;l5&k2jHEsD1v~Qb;H$3JnDX($ZiKBIgCfg|606H=Dp!AZn?d{`%jA2XM##<0{$4 zuj%c2(!oDnID-F5%cfEEEzUIrI~#c(Zt(qmOgHifvn#%F$~8g7kI<~wIK~`BjuYvj zRAUsq51Hdu%YcKQZ3)-c_xR%=NAY(J&W2$a0U#7smX`s;lT}!Mvt9mO!jjpXr7eH~ z_}u7uvjba4MC2nlVO9NeAQ~{K6V>SFp(8pTL_*C(KKl4l%AAnbqb=_#Hdx&%VwG&; z*gN_z?EMgEMSzrz0C<={bYTVI(*ehJtMx4SHvonL^3pvK52MZJGwW|A_TDeCi31rt zK6lK&er8{zYwxy$^A!zJnW}XK3fZS!!Fi>3S5B~QI)6tNQi)c(t zOj;)TyEK0HP2l59nM9UK8m=J-3!gU9Brw&H3euW(9WY~bP>}o&&?g$Z_O5^xaC<8Zaxvf?*NNE)=eRNBFc3XiX_G$!1y~3DZ>>%! zYBDhu^75&QnRnZh^z9I7X%%lgM*Pav>1D|U!i)-d=j)lOm|U&?Z3W6@1l)2-@03)s z23*C(ISuIL7}+Y6as=r=Nuov2AH zlK_>pJX+8j3#2TGT{D!!Ws_%){SFVv+hT@>I@S6@rUB}8wtMI6tzD@}3j0*bBw6d`%@Ab0GgA#vf2(T?3 zKB2yKwpKpcMXX0|UG)V82z33kP@FCMdvZu|4elIHHh?qbuur?O(#kYu#kI#IpAE+} z>E6xC192g)ukG@@7z>3=6Um>xX{+7x)zP)Pn(B|?;qwiMZbrhZFSfU%RCIRoa&lr> zWEU11YiZ#uxpnGj(&ZDu0ullk7}$&~;Nq6R%m8eVf{=8cfm&muf($OMoRvzk1ct`*MY~!i zE4hZ0I%WXyXM;A2%tpiL8Eb^%V5MPkaTp+_FW*(b$b~=d1-O0_4alChF{GWou}PnM zeMX+WyU8$IcvY=lwg`+=jP1oR?vrUah{W&i`{wf3ZJGLR-7sI#*g^o3lM%MDs?yzc zfQF58Zsph0JZw`R8qjvMwX`65{|~JJl9t_kfc3i6AZaN$716@f+9`7z!G%pjO)iO6 z4NFs0M4{23O_)$(kxtGk`u+r{h3ulDj{wILkIx2rL7#DVXQL=7M(*Se4({%V1}28W zHbYwnZ&tHH(s5&YZ+Rcyu^TJbxjMbT&}{FU5-12PfND&iBy&~2+WC@trM2%@{`Of7XIMTX3%e@d-^aV+;FHj|g1N5y58L(ML#`(Lb@9YGiERQWEq*z<`ui7_>T3RR4lMCS!as#o z(y*E6AY;8L=k2QbpsXA@4gHSiXiBjNsMsI(BG&BD?*zMDKUe-dz=aS#;EKn`PGV-O zu7H$8%h&v5NZ8Zc3pK#u+w22kpO%(^fkM|oMkXdrl~nbBDqR|oNI})?EZy)%S@7%m z)TKw-B!6o%8oMizP{&L?m379|e7RIW%+?P8kX{@xwJJg!C{Xu{krW5NPj#m-9E|>M8w=QH17Mss==1pDPdN_;Rr}}6oSkI1v#a|rPXq5`tmQ<}*NDF&-yY~eEiCsnJ6EY@*T){XV31;waJxZS1%DIH z)1(n!Fj#6uhkVr3l;>OWMt3)Z(iv3Ij0uw}tR$2m9Qv?y)IB#MdBsHL-T+6I^_JK>-1K-zOqM!WQlf zK;jLVB4^iA!QN~7bZUGRCAL|YV<-It;f66asOg|-nR0szPBr*@D7*BdAPx+I17k!4 z92QcRmQ;!?ty(NEOPTUd(B}T62xzZA@J?!*xSYX<%a9-#ob;7C5ipF8u83*t!j&gOmmt|O~)H4W)h)b zmf#1iB2JGR0^iAZJJ+AEGhjYJ&?m%E+E}=%rZW0@^FZo@Y!U$hLXU}#&bouYgV+SH zM1ktrpx-vJw`ZbAQu^uBr=A}F@X!NGI|S1}adD`)u@AUD3O0yTXG<9G(vC{s{?OQF zM%vC=~SNC@ZdPH=qmw1P>f6EFmG`_Z}#N;V4wJ|FhtLyag(? z!rU7^o$c*b?h$a`3v$Y5vwQY~UCoD;m7G*wZgIj?pzen2Ok(8b)Cvn(S|AZIpZye+ zGi~1z2}cM1i8#N^C|JMgYOa(uHBg399QoDKG!0{k5hzl0wVz}lm5OtHyrf}eR$9Q0 zMxo#+(S$;pn>wJRoT5f7ANzGtT?8jt`UHc<6u+W>(@gVw3?$}O?nv( zfqXPA_*dvlPeOP;Y!Pt9P^Sx_vr<`x05XjQ+1c=qw9RdDpjs3gD-VJjAOt8O5!QG{ zE_4>li|`555peKfoIczd#LHos`CdR563}YtPVNSBdCy^G-%D9rTfL;nNkj-2l2_=5 zAz+L~AmT7)WL7%SMnp7sZTP`8v&**=b8?`8`?z7pP~ROH&n>Vd0v^iL{i28!ppL0C4hDG=9QM^_T+& z4+ef-xPIF+^5U*6y(oE(M$56LQv3_lQu9~H#Kzp-f&PA4<-D=FMZ0gh=FH^_Z1OSi zZ;aURBh5Db3Xq5k^uPdDG*lKd+%1G$Mr1lDvUcM9YWIti>APJZ^E1teQ> zjTUIJC@Fds2Nm*K&MIq`u=GRQ)FJrZEQwgy@?RmChd9&Ad{AWRjJlGxyHjBr3*UXc z*E3X`{2ICG zX=-$rZt-K2BmM=wWo(p80@Y%{?0gQ%<42B%AIDzCMA?vPZ?H(I?Fi5m2LJ<%*emXQ z|45CUsu3>k^e-M?0B;`-f-d62Q`zybIRq6Bw!kp`X8(^oTQl-;qNAy%gSPs{Miyq~ z0+FB(Qc^HK{(V5a_!wmglIMlM2?g?)LknhaPY(!@{7;Vts!zFW@;YC3O?|n`)4}uc zL~5wnFjSnV4J`d;U}e?vV~f+i3dL^j_mawko`r%^wX8dB5d(0s5NVqa7DdYCF!}fd zW^M8znZ?u%M`(wGe?Myn12p3ikp@iEFqW68QccRBd_z0+LEhcg;(<+xx1ZnC=oe}( z^J;9jZQp(OmX=2m$}lY`atUc!NWeA}?s@sX#?pnrgL!Cn52N#pK(EOWl9?O*s%XYx zHekNy4U3HGXe$I%n*UWNeUzvw+VQ$}P{jw#OBP<~pf#1LRaUXcJb`N ziM$3}Sy^{9X_@@+>Vb=hp9ikR_CiuDST2UYu)10_Ey-r@) zok%NR)1E)n@$*|XoTgX+vl7N{veEjCU)fnQZxWi>VFzYV8RrY~qxL0Z4s%HIH<2jx ztUATopVQB(qnR=C;@S$h9+5Fp#VlLyfnB73X1-<@RRWn zhPh9t5d8STu`2mDLNv25(=mrpkh5sBnTK-Q0g0g&2m0zJOrTQwtDMi5!@&sMv~`_R zQcX{XL<@UFJchdLd>-WSWXlKzk=NsSz^k5Amv~MEM;WtZz|bQT3G4>zQ&!;VO+hCN zEqBwhhRc^(iynxUN^^kmz{SM{(EkyruRxZ=G2wH}CF;C}^mU}B3hyS~+n&w#9Z7us zOS>c~tbeS(qbM}YM=*77~m&zNcEf><)K`zkNU)p3PFfgCRYX@OXiYu{}rNCuen zdf9`oa}N6)5;u#Y;w(t_e%;(8$oPX-9}P=;+%%-X{i|=-WqEn(8LrYu)_B|vk@;~` zZ}}zk`5r2#z6QdV*TakXygTl}<9Jyky| z|JImbZj5*xL>>qK%?}C<$lWrax#km0;!p%5dt(6eTcNS$$~x0bbS7pQQOB7 zI3`{qA|khR&^?h*!1G;KK6)-uY~$@yZir{Hp=OAM65R_f4lxji42qEs`z#i^S&i|J zA0k?O%6xDWvQdOQ2ks2#200MLc64i0sttFXb6)?Q6F&YgjAEg`{NoHdT4q&>OpJ^Q zim3Iu+ukF_<`5Lt%*NO@! z8n7gNPG>Sfrz7J93?@&f@ zP}X~-`z8M+a({8oD&kOINooka1C`Z1E({7qB}mFKMNL`30x~t|YS_QqUCAUX)hekv zTYHZjqZ@3vt{p#k>zx9Jj@C}&+x4Pvrd5`$r;-ihrKWI+^LT&E6I2Xgcs zmW(4$1Yt`@B{`$0=<7|}i)bebyM9CUso*y^P%8B@ZzhAHmX^gH2@$EiNX};uB_}8Q zoIKmptKrT>U%3&486}X0(#;Rxas2!TsIxp#oMx0oRJhf!cYrAiiN~>GeImDtxeYd=Mf+TDcHw1(Fg`b6`+mUMKBhcDLTFZ`rkO@Aj zgFhAoxi~pP+<~bQ1X4Lg>4!_Xydyv-54sH*C6^ozaEDokYH?dzaSM8ljsy?T2>@L|$m`91g#^ z?rmN;SX$1BK+eJ?c>0`iGYB&?!P15DBC?kAN~7hS=DT{d*&M#@%(8iMaKONFatX`_ z?Hkf0RHU1qM*{2sV+^(HK2Dm{6kST6Cj_~l#Yg2-9bDU#D z=7&I2KG6epuiwc-J}re$c^|&YIHX{bhZ)1Kw>8Cf$c~0NrIH%zGq_q#?F-c7#2{L1 zlkt-xzdUbmufIoWLy3YY#EV%O4PO}g7RF(DQ7M&_RN)UQ(LH_n#KeNW7RB6+5d%(^ zmdEGk@C(P6m(vC6>zr8ESc8Lu-3spR92n+(8$ya2!;3so>T4UCkaA)}&FmUq{=G)y z1Ox}?d36-#ag=U0QVtJam#lWz)gkfOhr|h=@v*aG)ZBqBAaBtilORS8LJrJ<_aGC# z+3W2HJT#y(BhgR*j36jif=WvczYsn_nEfKX+vc=;1i>S4n2dY980hH{+5ldQ+jfCu zkSE0)_NpT}B)!=<2vq<2?+`*zi2AFmAJA<7!IJ5&(stMqqmU{U-axW`GFkIe@)sb4 z{r8qWlJ0I?C3+G^ci2Yr@HjTY#LWo5z1ql-h2K|G$#-?!IP zF&rQk`tqr$p5w6`nfOOGeD>X+|8pNsg67Z$oBLctR5k}+8AHSV8?+L^vS}82po0R4mo`~|-T!ufIQskX5<0_T%ks+D_snyTn(FW8W2;rHbFUyx7C&ws3K%-Y ziPb?xL9=yoODm}eqKcSo3n#M25tD&-*i3lX)coJ2(e1Fs*0$+9Pz3mN(O#ht&CTRe zQ%AYf%OJL;K|xBdL#W~_h`B{nY?@Bo2%<<*#v?XfU99yv)Ymc z2D;1UyWXlaEQ=`sKQSCP8zgyGS@n)_q*)$Q-Ot~D;C=-8E!Z^!Gr-l{9FqdOYQTE6 zTt?o2`+Zddb6giP>gmxj1D2_jl%BS+xNjItyFgiBp3R}=!^c34oF^WWyaqM=zK zPE9T8?Gt2ANP@Qw)KIG5d!vCGaQ^y$VWD)mw(xoZK}Lp!*!$zaXh6Y)k@i+^lw0L0 zcoUD2o*eym3dIgQSv#b>k!!3~!)VygcASw4=oeLSgT(dctzFyTkuVd?7Fis*4 zsFJHRQ@JHKsokk;6w!w)fX+rC{>;!SbPSu(l<*(!mKYZ!J2gsU4c%+A_@K<(+MVc8 zGepYQ9f6r)6vR^M1kt}K_|Vvq7M8K+cYE^#_fU|W9p)oLo@S?Ub8;ngn7$_Jr7e7L zV+342M>&SBG#eCZY%0X~lfvI&ytT9CWHVC4HWgzMqo0_2e3W&4!)oDotP~f9#ZAz2 z1v+T3B>H?(T_%vEI}i)riK+UVnMC*?Mkwy65x7`#ik({X(uqmm$NJoRI5Kg4dwq$;=~aIUfES zVYU2pr~KDNd74H*pd#6$HNnOv9TeZXz6b`J+|^~t$?`WSk1u=K=7vlSOB`hvG-2!a z5*G4{%R?OSZWcM`QQeq2S(V5;snU!?1VXHoS`~FQr|>bITj{&pstA2si)UqF|3Z5I zWm;bMRCm0*s)`n@BLiq@IOC?hl9Cc|3IUP3j(xDyZ6TkJtv0+4ryhgB#NRoVT5408 zo;7Lt^t}$0iSVCSI^1KO^az*T%1kB3qEp3s^EixsPc!8n=h*}A3V z41v*GQYhFH4nGY$ollvQ*c|-?;YnH5T+BzkCq^?e28gFNB1kGh*9d z-J_8c9S#i%%gffVW%qk(J3rvNG*6GC1ELUQAOv&SJM)Hs=XP`M_cB1q1Fkt(*7x4~ z5&$7VWCh2enWsR={{g^&Z|#(w=;Ipr3Z<}~n5Cxh23RG$2z9XW0pKq67A9C6qu4A% z_mjtMozW*7lC<=E;x$REoLnAXeC=L%x6LgBEP`&rW_t?z5De;WL*>DQIz|P1D@6JR zbSxW~3yc(4J~;EGrUe?T4vExMobDb!ce?$FNaT#Tg(C$2-X|RcEDf7jj9<@&z|FVm zgf{k?L@WKg+aAl=OMn1Dr_McL=?hK8n0r2q*V=mGgi2Df8^cZ=5%AMkgBJFRf`;Y+ zY0cRHtEHvAUw@?GjzL&>KwakjfP$uGIzXCNjckB08o08j2e~uvHx@t}Ni5yEC2}(? zYl<;Nv4}AC0|w<-k$}6z!j^{SHeMiAL`kbB37w0g$NHbxN^>;A5#eV7eZFxC#n;!9 zUh5CR>X?N(EPc_NXvpOa0Ajw$9na*DNiIijy!u`fg5x_5G#mz$->*xXmB=o>L)_=R zO}#CHqm^8)3f+h(q`jTit@O`t5^5S!aq*hiMjYl)Sw8PGdKX2{@A}gdB84FD{rwrTF(Gsy+#-;o6VcI12Bt!MqDpGRQyE`>WBULF z-|GvnfYo>G!NFQqL^&o7jux<=Qg0qm`l_dCK-nBzeCw+nS}nwZ7R=zfCcf1FvsIa2 zX~D|I4v>UJnNaUo0(vD@GYY$)#x33dzY8-x`OoWY)8&90R`+nNA>#Ib23ghr+o3^ApE+7GSRplXbC}Jk3lUmxfT{LW(}}BK7vdf*eDqZ^dwOH12s%5H#21|O)A|5Fqs+lq}k}y-IJ@MOGF$L zEmgCbWc=WF=YU_r;-{taX{B~qo~Y8-=I?G2A}L!ql8nZevb45qo16yjZ|1|OKkh0Ic5w@mf5 zC78jwFpfVjdiMRh61DA|f0@2Ubzu8opYWNH9GflS<{Fa)=p!tNx`#D{7k^YRBv``m zOwHMRW9jVNIvqCZIuIHXW}c8l2VU-~H1wNzHy3;iUPE{~&1-DU5dC#qCa6Y$n3V86 zo?0G276U6r#W?~Py<|y)eS({TF^0xJ1u-eJ;f_Rc_4U0#p`_{ zR-k@!V?MX__x)>&eQ40pmzVrfjULK%prY(t?j{|0daXDD z@qv*W9LXajkUDe#&hkP_A#;ZVVQ_RVdc!#f7194NwMdK0yV^8DujDY!mP;WcnM~4RpF=A z7)&ZKwgHpf+nGEAk(CQH!k*kCUSB6T|I7dhLCt$O3>GZ78H%jeX&h0FLMJmRs~n<( zL+yoRe{8qcic(=)c9_ zi1jUN?S1Z0p+H#3AzcJdUpks1?NIwnh<-OKEiL5ECOapH>vO|uE}!yg=jd>K-B@(d zieE8M`Yh~Z6Grm-@EH3Y@%h~HiN9+*v+}v31AZufu!>;~|0Min9Bf~vVy=iLpo$6R z#eB((fqQD@Y!#Fsgl1J|42nLDmd6(!>dMFD#d${48!q(q(5zn08YYJLsgu|Z5sc58 zasZD`$d`n}-mkt>BzCp*GvkbCHpDuuvJw;hP#}sFVFPwkqr>B{2$u}c_h~O+%x^ei zcx^ZWGTS?98KgZ>ak_nO4Z!DlYs&yEATXwqprWE8BO@~tTtns|B;;W20m(QZTmzKQ zaWkN_A54Mevj{(Pr)82keCC)p*@Cee2ru>sS_1A4ClWC0&K0TE`NShidOT}y{eplWs6ne;pf+?Y^i3%BY159O12c>>o6GTe@MK;OW<^X!G?b{9?D4iz*i_ttFh8a62RZhIFK+QO;+| zgh?&VQ6DCv&SEX_>#x3>Xw%01IZ0M~j|YK0@p{+S0}bDu?^5KRcvD8ucPda?s@$TPbdyE6!gzLezs9E1xmyBx^5^rj4|sRDxm~P%GCli# zoQRB<(_m!3zL}$`t{!-^Sxci>WZFzlC>oJ6@&Q6OJIt2`E4kOK1L1&J$4MH;LVx0* zv0Mf-dC*JVB+nFq(rOuVhSl%;d9miAg~&ECA7UhFgY#4ho+waOY~epU@{AEzTPyv0 zux`a{^=$QmBy^=BpYq&h(2Nle>8NG-H@q|ip6<_oWNfhsWKi$xt_Fv(A+K6I#uOxn z1aJ9pCmPw#V0{`eLKkurw?V5{go{DN78R;V$ zLQH(Z7{lQ2<77hL{dWjaZ2oaHSoysrABGL6W37Kc5W^}G@g{(k1^&E<+;Vb#H8ty0 zhY;}d^YRAJm|>9&?~@=a8X6f6z~%xx(eL-<5Op<~N8&2QIzBX#AKN&wL-d(+)X?8I zSsS~=z>8oIKaOEPA{fkZ`6==7u?iKqh%vk=Vw-cj)hkqqBp@C%Jj+Shz50pi?5k6@ zI4P!-=U7)J;TRw=VS6U(53TCz#Nn;SK81KHh?m_`-Kg1VDs*bkF_;)!CAbyz<6?AH zP(pq!5R|<@a@f0C<7RO9{Z$Mbq$97)u6u+qQs@aHEykrf)AS43syrn7La+wSAS-3f zFx>vVIJ)jrMjHq*{@)d6`jLHk3&th(65oL64JQZ*S4C?ED;R(|);8dI||}rjmQZ5mIST zku@t1VG|2AlT_0VUA*@lf$^Ymti?xP_{B;sqatfC%YL2<3OJ^L{%7lKL1@?>k~VnQcpWLjM2*u}u7KYGL3q z^#ccDJMJN^wrt2(+K!D+=JJ!hhnK<`>_KYmIDHd{sgrsJ%=p@44$A!Kt~FUTjW7&o z%BpKi*XOdBw0U_l9<2~kZ#wd12&gnuupdFkwZE&{r_Vqx#)K@31(&9n89CvY-@i1s zo1YUeiBvhWQ>C!%@uXDpv~hF1YGh;tfVDxBbuT2S!qmaZ35Gg{-7be%J+R9#08_yt$~&Iv$9Hx+@$MfG8If8azpbFPZvNsI}VIG_7JV~z_nufQ|D zAxe4Dg#Yg3a;P}?9k&#&IH}6wWF;* z>Sa{L$rqj|mU`Jr{^COy3hMT_G)WdDN|y(X^gX*iGT!rP_9|iIt&%dV-9j-PKf+m2 zmW=l+WEunwd?G^W$0CM{?bNlMmgwAab1@@#&Yh|+ou z{^!iPJS^1q`Z_O->aHTT7K3nfri5&ji0Y;bj=*N_|2`rOnrle;!lZ_0ryUcdA%^eQ zwIF?HtoR%!C#K%iVI&|HxGenf>NtNQfU4< znMl>ecJW}XQrebVU|}f_tu=o^#H%#8+bjX`PHPg6Od=8u$w87v5ff1nYDbBqf`EvY z)apu0<1(3y4t8`=m5?$Z06V~S{sim}0u+DFb=Yq0|Gz9*J()s+qbqUoTVKC2(D3Na zC_(6uY-AMO#S4Hi=YSTSkicgYI8MVWu6$T{pFnpf}xD_4A%j8G4a9IqeAdJ&<=dq0!w^QymOcL$jfz9Hb+jo}R!F=NE9|93$7b z`2$}7RNoscFq~kl{B;>QH$M-W12YCXN{VtSyj|@Dmlt-Rqzu7tXaBX{x++H7(h||j zE!U6%&8YYo50&`YcYpA>u#hP~Z^qb24o!hNH39+AE#!;W7!s_G_Z5ymYmtA{c3(C! zvg7yLgJG}O;*T1xQ84R-A_WrYvI!*!IW&s&L_~eYsoxJjo838ZNlqryJxAxu$VY9zQ!4ONP`Mu7g!xT;)voOWs4K0=r%gy ze8)i>Fwhme5d zYGNb&EAD279AX^B1{yb^X0^xHKVnewP~ZhcYZz@Ya;3*3>j;a(<_gWw=$3 zPiQ^s%|PMW#z=Kl^Da zIQY>Nk*AE#ddX=Rk(EC8gvWE!;LJ8mA?XjjJM_Q^c+k>clM6pEGq{HsCZY#?BCxE_ zpOY;Da^75#jl6)NU~ zit0ns<^A){I5o8g7k3XsK7~g`Wx&bls;;ip!;{UZxBWiXR!723jyhGZtV(guTMeVf ztJptEV{MCNv$b_9qjEa^=!E*yM77=kItdG797*k55cJ3MGnoUdb92IiyB1`+oUtNlSSjT)F=1k@sZ%u2bB^{(`I=koOp6awJ?8F$33 z|6}P&x%oyB@xPrE5sJsWgiwTqZSSPdUqX2efj{E1Tn1?|hq{89958`n+9P3(uIBQc zTrDm4=W)k|9G_mFPU0e@WT#NF!meoIFW~MJg`i)@tQezS76S20K^@;&9#rUo07uWn1eUBM+ZOJ3Xn=k* zM7nPNdgIjSL4u+79FL2TJ7q`B)aFD~XL!dGYB>C2v7jzP+ZYASxF#GV*tnLNpME^6 zRF{~jqpZ9R2`QS9`R8%Nv@#M<5YAe?nJV{9Hm0WN>V*ek z=J4r4*DK{`e*MPQ9P|TcTB`NHzd{V3;)>8Zi9-<-74y>--JmfJ9hd};J43Jhq!7%I z+TquG+)Dd@9SD9?`{AL@9g7V4S;FiB8&jeuI?#e}5*`hM&-OBL3vozYF)CE*wwb-7 zd=usbRSUm?Rcv%q57PyD`RAkkQzFMI*1>W8CtiGmkw4J+66gN6Jgr%m?U8UUPxE{N zkZ$6^F&H}sujf2IC{TS6h7gPHo>XY%b{bG5Few`IL+pW2*_IqD%(YOe@OP7pbO%gl z@VcmtkmHZpYF#QSOYC&8nGOp!zLsWcl1_8N1~9L| z7FVJQjS{RUJE^H@M!!(D<}#gwPLRr9yRvNeVd=4HcTHYbTj(c4K^$&LX7rpf-pCRqiK|#SM zyhLN0xz%6dMWshnVg#>&wNu@ctU5hfZjrkOOU$>0~W2~V{F5>>!p!cJg*PrEZ}GN2O#9a zF)0xRloJumeSLh`OuD|-);dk5v4j4na%Pw4`F8I=kS?1Mf2@`<3g#GsAK^QVjB?D; z){c+>3EusHnfrN5(KmXeqochZqvWhTFCC?&BO4oCoC`x;UDf1d1QL&ITmJ#qW>VAB z^|nW9Qu6Zh($bchbQ$&k8{FN)zaKhFlw?T|o05lO+#IIk4b{B$Yqan+wBYRPIs1|I zOCpYpTI0gGcTTSNL0L`X;||>pqsdZqG_5;yA?(dP`S!-IVZsVK6jqlS@*fC@Nv28- zvNaS;Q~^Wxb43SCNF)#$J;$j0G*MAm4AZl5czSpgd680z=!vCNFBJV=CdBp_G~xKQ zEM-+BO=;=LqJ|WZFwYXG1H0xFc*_1ct1P5sW>(ZKe7@(g z6W83ja$lYaOSYCg;~=*9>oft`x01_m?!fz_u!9ziFm&lgI*{jMsY?D)V=d(w| z_Ef8q7~0<6RCCTjnjJ3asH~z^f`un(WTJ7gnLS%zI6BrB7(%`*kL>!a;R3!f^`$}Xx zHTFG28qR>LWd34=;{Ms&umkXK($YYGb=b0_A%h0Z%&ua;ERUnPjS_Gy_I3jmR2Fj5 zYtp!408{c6Vk19Y5>^4x)JV`?CK0UVJ^ic?m0KKnku5%?r|DhGCGvokp|pGbbEzuz z>_}n0{sdGL<1WRp97!so$$Z9nZD`Y9!p5+5u_+;+&r6IKYSzxJi zjuGZN_h0qZTv*~3^dIWI=!-k7dRI>%ftmrM(FHLDBed!%!D@ttBL zr-De3;$A;;xC=an;M*~aS@wDS9!vDz_V0yG=C|HXq_VpCvO8mvzDjyxuQYL+ofe!- z)yNFe($NEw)0WxU|Aflm)CRMAu+6sta7{b}hu(lp58Sn8r2wXj*D}Rl4Q6@O)y>Q9 zGVk~YCI-O6+y=V%oDJd1g4`90g_xERR0{QoRv^FE^$TOCu8W*kSK6&n&tUbPN?HsF z@5s_{Nkap75Bqf^r1X7C&;1eUxGS5Mw^Rt(3Rx`DhRr(#Klfpj_BMpst8vB(l_Z9s z9Ek6{lL8tlEnTxMcIGIDae_M&RIg>6Y0BpQw9MnJ;x@g?*MCn6`v?#Nw8AV`2Bd$6x7j4HyiRS%zb!Cc@aFWo^ z1+ZZjEDy~R{GZ>z%a&WKa|0|5J4-U$8C;s<6c-DDfDWWidyz3gt{Yrwgwy5z}W=>S4cyc4uWONn@Y5 z9H~0eY>Y?NVx&Y0N^5(IME522HHKp{y2SjI#D~2kkO>5<$D+7|3fNKWt#kPmo@ZTm zGzRj%LJe$|f%d_M16~jst@E^X-JmBZhLaq-)?#D6T)mrIA51!jmL#H=)?KXA0F`3k zTu~9DB0QLj08Br`3bLFtCA8MtJ8K`&?TfqZ%wPU*mO86H1}zA+QV7lm*35O1Kt>7U zge~YcsQ;&;1q?%6TaIVQ-afLWokB>IJtSH1E;=@{iZ_{;GsNRyW;ktTvWO+7+Mu1< zCpHsRpQ`lTHRDR{_`gYGVz#hq6)m8DPi5Qhmo5GIeD`NXuJ@Utxs(CJs5QoIekRv} zjm7Zqk{&o%0shg$(-W+G0kcHt`X93)05sn%2&c#ra9jhsgTU1WK)`G|DE34GYpssi zApOk+H``&*!9A-_&44#hSTPP77!>+g8JL{(=g_A3{lUTeTY>36)*#0c8p1n<;{o9V z+#N!4@?3zQ9B0gfx>6n=9c5)_BOxKd+3NmrS!U7lmI*O?00X?Z%05ZAMQK8KHs2F! zl#hd^Q0f-3xGQSNYOzucI*_IS+Jz_<^QdV&bRAq-fbCH!OTPft^O4T1LSO_|I#d=>$p& zPhr1~v;-YtHpva~9URBtVZENtI6sI&4p!UUtxWxPU~>0;tJ6Z@S;W9j#X*xm$X++m zhm*X4nyx-1(F8R$dI5T_qDhc+@O<;eT21Xsb()Ej()b%uy=?6xZYTpo%bPcezPG?4 zKf1N`B@;2QWCMTx5fb_N{xBVQP+kcO9~~S(Jb?<}8gzLzLA-l znTa3r;$V9ZR4#38n0?I_ZU)SJa#6AL2EQg)?KNZt0R|MU z7W-Q`6h2;FxFkeHgl}nF39_3A*cO&kc^W_P@Mv{cVMHi>(elx!{`UT(=Ynz3)h{23 z3J4yJb0o*+d4u*fdAC3(NRdNUY(Ontnm@< z;zd22EX2zAYXVNWMvkWYRf)=P>XAyu6;p)I9WH?cZxpoEsNYifs)i`1)zrR`yF*Cn z_3st+foP-XAiq91q`$muZh-feM+C5$=Wt0p0(;wtxQqeTN+g7Dm6Jyg>_m z|I_Ltf4;em4ahSARXhQwNf#ijZfqDs7-Wdh88f$gzkc<(9tf4F)=#v4ZyY-caUVns zbgC|9FrWN;S|mONQ6;h^+BPWzumGwqz(=VnECeawo`u)xiDgxdCy4<80S^xkqCMkj zvN+uD=Ubd9QNWH^iLIGK;4X*0Bx#H%0>pAyPvTj&Rq_oHlF8r-?5uQc?ZMZBb+qQ6 zvbCyglvuEJOP_moOofQLdVZ0xwght3OU^jb=(;D?9hJ+{({F@RYTGg}FtNo{SI$Vx zoPX~Zd$9e*J)8aaX8{c!(ae_^==GhZnJ|SRWD`*vX}*wFz)n?JGC)8~d2ZJCj_cXF zl?*Wq57)+3;(a`qU~|N<6qQx4?LcORFqfLOiBd!k zLsd0{J=oF@^0Km*KYqY{`0$6CT3S}!)AJ9iO8Uo_+x6`I(PL|?jLd9IQh_OeJzHM( zI-1J-bu_K>SQQ9|zd9t{ZVLO; z7YcfQj2cN8xWr18jSOo&Z(Z3}*Vh3$`U3wI0R3dPGcy$vi{Ck?VcD?7JC>*ZD{tFFCJLdfg5bn>L}& zy0vt&Lar9>siR+%u;>sF_Jq(4?8d!t1+ZuFZ&OMS9i7xi*k6n!iQ|$LNFaIU_zk+7 z8ODA6vzo#b5ss@o6NtFjlJ9ILXg?aPyUNt}A0OciVw>RL3=S{`jtpx-?ewaqU;mH1 zGAx>*55s|uu48XMUsJuuAB;~g>vMN9xA6TA7*iS=Y|_%Yt&FJn_&h*_<5&N|(i~W~ z0rVU}=dGV-XAS_829GRs;!Kp<4KO$oV5?mhiLs&MVLL3$KFxHv7_B+ops8GaSkF|8 zGGs#8af!8IvX*h#rLoitvJH}tZCf856cxunnu`A!W1#_nY{2dpU}OM#p@m56oJhQH=208kA##O6I*0RzOmw%T{W+~N>Qv-HM2RAUV<$CNajMlB<}w=*e8($Tun zUPB~azl0&Ea675otgIhG)m2&J;B^OAxtg*Cw7wSTzlyKcAW2T2?W}JW?(RYS4V-nA zuUL$mf(ple1n9m=gtO`k;0T8TV+O$2n`mm9Svy3J3UKqIGqLhk)^cF!hBSgou)4W< zZmPPVxR_32dt+eWHL&08hLHx?@bQ{q0{ZgA#Kek;?>P`?`2lzg2rho29VF`Ty=Z@T zwp=6G?2~8x{z=WPu_8E_-n0vr6&?*sm^6*`cHm@gV3UW4n9x8m5|FZ7jzJ=akK90C z|EtwF&c#Pf+Rv%0$uiCK+MPKaPq-U?ydSJySMde}vl%NI`#gG0&}vOZLG)m?@xCxp zlYM!)^beZ)Jd1bPolZ0O+V`Uusl_z=qOOb?1vm7)_|(Qs{7)u1x#AC6^6(McScRTef^>aS@Gj!Z4N!E^McOeFTZ{DgYfJZNe{I-bALX`}H zo{6#F1>v}o(klX}N>ErpI%i?vSYDo2n8mkh zoALgs4S$S_u>ErkqVfo*r=el7J~685s#E%GqMRt=-+-xq*l0VgSQ5 z$IXotPgw`3U*{{PjJ$|K6u-M2RamdHSPg|H}REW{NsS&h$j??(yJ56O#+gLh{e2l34;Sv!0S00L(KbO0?(J0HoNH^*o`BN zjYS~n8+2v`(YorI$;zsQy}b=lQTVI}usR1^z^?W2@$m)yQXn-05-oSn){_HbqSdi6 zBe9ekdBQ@oH$v`V@JPNRAAT9y`IJku{aPCyre|ZTE+~*%pfXKP<;wzaV*N}Q93(g@ z8_zDL)c*RLuJu5u7TK2UYMbS!wMyi@ox;Q-+s-wOT2vF=tFsr zU3tD-L-)_k?Q6P8p)J=4WoG8PWAzW1X9rf5^vqQs)@h+nBvt7%j`o5EhF*)md=60} zInM8o8r`UG{!X3FBxt_DhmjpF>r1@9=PUd64Sww|Xo|eJk!S4&t(Al{`J6afvpTsg zzOcxSZ0+E0c+c9bBqk2NTrJw}s~1EWfN~)qAW%tT`*8Ya#k=<=%G=NHt#wIwS()Tc z9@810&TFGiYVK=&3yT1*Sg7Re_VzaT@=ONs-cGTxnQ$_HK%R{~$IBP&%6AGRhArRp z@9n~uHiCYS!?y*xC(uW7oK1*I&&i2VA{Ka`k*Z1snih>9S{&sGIlgaBL z?LXe`ev@W^g(>kQ38iII&GOyEsgf>BS0+o(% zuV>6TI8eGMiWf3CH*or8qpXBz^^-auYfL{S4)ysld0G|Px!n+aq?mgjTD@a9q0?l= zEVt+Q3+yOlz;hng3vl)7BkE&*kIcBR933L&kknv%wIy_LrgBz>OUiq2r2ToR-rP}< zbyw(B%vOCMjZ*!xK#W8`C;vBxxdoeYbO(25gJ-l<&eh3_!$I>QfnLo zdjIW4MKwP>h_ijshcqmr8_m%M$x@HygN?e&Ud*X6ABR;Q%Hipt*jMJTCHkM9`Ik&= zX+uLoME`EM2MqKDf$^mc8}v;m4h{|&u?f2$slF26=N;_-!~Ti0_uosW_BP>vO~=@X zhOpx=MYsJ}Xw1whLxx&&_%iMtLtl8{^<$XODt9&LUum zI4&3At~hk(GaSg)>I-dt6A zNEq8vo)4OBgHZid|H~B2)2M=`=p);b&%55?3J7a(uoCy+^M=2K+TRMDJv2RgmQdoW z%nbd&spmC=kat7%4lE9JkpZ@Wu-pE5tuwqc5tQ47m{-X*;4?a<6>ZIHgT@uDb-A@9 zFCU74Q1e1SWDU@k!2<(+55JYltIpPY{V79leAmX;*L_cB3%tS%ZvQpSl=}<>V`!*HLJAIA{bzaDfjS=m~XOir#{(vL>xWM)ho4VRU;} z*V9>D?@I`OMUsJiFM#tKx4fkQy)N@~x@BAf)%$IK^wBLu0k z=>K3-a;9G-z0(DP$KW@w;s0=21~yzz-lU2jRUuX8W3D#T?)vMgMT%Ya;-aTH)G-4wn>8xe}$cWQ+Dfu8mW~G&eqaZWl zeMN?KdA+-GRM%JG+1pIsi7b4^{5fbT8*lQ&0Nf<3f&J;mpmZ)*JOu^bEn~59m*gTU zU%oAkg{xt1#3?s}V-2UHjlAyCboQ$>zuBd7Fwz6nwTX#ID}cA-E_VMl?0T%t?)6V& z`&ib|L6T7?>uARkXVmHC_~FAnxP_3?R2?ia@-9rhwpBv|HZT9d#asWUM2jF(|J*G& zjGKmn>Q%wf$Rr8)J|rp%Qc_ZU{9!}UU+^AZva){6H~!JGwl1-VPX;Uf;M$X8zS-fK z0c%6f_S*5Ggg}Xi)X2~f2td4Lsi_BQ)vB3=cFfc+(@osD*@IV~2dQO#jBL07PVj~&8xhq!^Ov)WD!_c8yg(7?pU z6Ej4~FhW+!U@dm0{OLju$Wi%{NFXd?;=eVA6!pa1nx0#%!O|(*C|uZQI9rR{2!4xM zGCUY~hw2UHkWp}M($K8U1!DOkZu74dRCWpkc4wu4hcmDgVbrT*i z0OnQtR8%en+F6Q#UDgtGzYWscgM%{MaS2kAyu3~J#0~bLV0I(T5xWo8g{t0cqJq7k z+4_i3^IgtpX5)_64LhB(9W?c#x>^8NkKM{suZ4pHmw?Ta&D`FLyg%ugwVv5^a8EIb~^2bn4U`h>*95U+g zLo7lBM$;^{wtkQEX~f_;!A&w z>P2#L%2~&oW?JvO_GB*}FXfO4M*@H_4>rvljeG13%6ZZ?GFL~Dw1n!@3P_DLe5Ph_ z0*Gk9P6KbH5Lia3(uK>>W9$pTWcp8ms$+owuR=#H{WdY3upl-RwLO-!U2w}*fgxS? zTs8z;_`Z$y7JKloZ5=4ro^Pd>t1& z%zXF_Cas3W52(FwZ$s8n-88RcsK1C9hwiTY_CTRp*ps$!WHk7R=TyCKVre!j7hPVT zQn-LcO8K`aJcxv-_I&+SNf;ScxtG_*!MK>?8A!r`4wS^7MIE;4SOvPf`mbM!^6>ER zG+V#|Tj=2|pc1L~^#h_3umtx2y7#njSS0ewlFVv7S3X7k6=pe-u!AZ@_5@(6ot-K1 z@Lq$LxgtR)H9I?6>SCoXk47He>VQ`TvYech1T+;Ue%EqoCcVqCeC~JF)q&AhQ~Kj6rr(XN=4JMuvP5{pZUxJ1Y{!yd3X$W z;FwyTAKh128OFwE;x&t0F5tv`Mlotp`LBnBASJ^?3D146V@9M22%53^?mfN-zy-YA zGj_XrGs2WAA|Sxm7@~j2M5EK;V4UQf+tSH+RbhZ8x!r^J{^OI6!W?h(r`LRAFbVf} zCxfbYhv^%*8ZJE9%DhsRFvq7Kl->s*1mJ;4m2yeawP_l}_jI1VOAk#GPV(s>Spz}? zOc@hNbO)wpG*5b>yTRfPAoK+X%gD;g8W?0+6f*>nLkjm|GP1HT zh`1ndFF@S7TLmJDeXihF17hm|!(e0w>A|EBB|5Ia!pa-m!!BW90(V*;H+Nbgq4ybl zs}Bs4#1U3u(0&~%&Ue%EOjP}qTsk&3uZz?h1hC8w8S`9FD%lge#SGAslR~z}%ktbb z#D;SvwyncBSfejq(T7@a$R**2{J^W`VK1+U6djP9Qiz|&!g!ptO6KZs~_O% zv;Q%@uZ~aB-ZxegGjA53uNJ{ZawGFF^-FuezoVIBijNVJ zj4tdP#aQifMA%i$O*e+Z!6c-l{y=PIChly|PhK*Gn2+8U4O|uk`1qS13u>Tr=o&{+ z;0ay^!x|?|w(W?y!HRm4P7mD4ANC)yy2tXdY311roBwlU+t}Ddnd}2owb}0%w$E)J zJnURyMRfJ_Ru&giQd8R}U|)eR0E7}yQ3P@rB_#a8?7qe%eVSkM1?-u*@hyp^f>d~} z#`A+~qd*+aX?EXeZ0uqGn>!ZN1#&FdfA=fT@WO-YRsmE@%sh zfTOA5sWnJgcsKxoh|ti$B8fg-4T8uZusbcdwS&CY`|X}jpk&A}C7TN90mXTWVqu!c zuW5Lsz(E$4lafsbw1+46i<&n0+*fS>#Vsa6Y?m0vHkQGPORX3LV~`O|&r2W>8Jl$; z#?x-xg^!1(0QDN8e{>)#U??d$ENpdpYBhpudk}B{cL(MLGMZGpsF|5s&IMk0_-kF6 zAx=JDM?C2X8gPQOv{L>Cv%Xxx*^%`3H`jwrQK_RE9v>_+mjyL3`%X$B7b-eBEAZSu zr@;Ex?PE&Dg6{gR8aTa3tv$f385Dkx;8F?~0Vn`aB?B@5oj3YiCK5?R?R zBabaDxiwJ|Z5PP=m^PCn!aA@V(aidFdrsE`gC0+|3*W4NPBtKuny&Lt2b+~1=IZ{= zmhEPs5`WKG2G070#}YBGJR)03d44%bJ(6ouh*X_kH028d9xS|NBk-^D49LLvioJZz zy6xB1y+Qd-juf=snkXEI%KeBW`D>KY-Fr((s&+6ek-f{?xiO~Jl=I*CRyQ9;rOD3= z6;P~z@g*sb)!0xZZVlgW3q)-;c6J0|^Mq?q0f5H>q;e24z5*&fuukHm9`Rp#7#Y>7 zh|s{i>(7GE`aVnrUN>+!f#dt%!&%+RpFOKp{VW!k(RJru@?c6_++^Uxbe^{M%?l6X z;d1~tm}2q+AmOf`vL@SDT=cj(7$0%k)>H;$H>E`zShn(Nx-&Clveoh-_928wREX;l zTBKiJdR>q9r3*YK91(1AwoszI_LjdyLAdWFy-1olP7^N-nFChUIkh4zT4L!f92Ojw zUQDEdtmXj$Jg=nap!QeFt`Na37Z<9&gJVWc4%!eGKNqeywf(fH>GV@_s1X1g+baB~ zj*J3D1w#_u2R6KgltemIzcprgDvkZ7Y_Mqh3qX63m&{cg%FUsUjBJSg#j>)}&2&Fj zgqtfWs$V$WXmS$8r=_x#lwpATQ5h*sQp}7h>=F z$DVZ;e%9!>rcdLh2+5>O3Xk>xbPSC)Cpo#vX;b)!*~!+HYaUCJl!PP;`~kKU5Lhj$ z%@R)d`2PL-?y!R|;0pk5wsDja|M7uAaxg=eE!ctp{=Zf@eUc(^G?}pM3;&pnt7R2+ z#`IMA>U9|UL9xiEtmGDF?V|i9+pc`7ASif9_A#m60BC7&0wtIr7~utcTe`(Ix3&mF zOH5b3zC9X=Euwm*`?jt%Z3mA*D@}|gB7lK9L_Y349K}G1>BmUlpWF|TF6j%D{t4HO z)iIw-1i}Z0Li&vDm_c;o?~kf;Rc;up$jE}J)ZZQx@gqtUpHk*`3lbo`7D`5j6Ah2E z1^P;I8*E~V*Z8GD9fE?hkRgjQyu*M}Vnbu~L7@;%pqawU) zMuws#4v`{P+<48bW)Q!D0iA7T3jWrhI8}hMqWE89QY`&Clh=d8OR4#c4*7-3NSNCF z62m#8y)Sz}za|_2b80Ugq95q#X@xmrca0a|5QSjT%=s28U@VQ@>T_*HK_!9^b=tN6 zze#qJY8%Oqe;G)3MJ^N4VCib|RMXW9v(8duAVLC88&E4vJ-y@2tTi(>T96w9?`c8; z)|1u6M*Bt0T2cdvm?)n-T3HFUwrTGd$$v)9uCU4_C#m_00gvYXr)?N>U@8o@s1#y; z5gQ!drljBja9v9Tmyi&VX|DLE4qX}A8yGl5;C~?(`}q1#l}tEBjcG3h61YM`bS+p0 z2L@13QTIDFD>y+9jw$gAR1?jDVgMw%9Sl%Z$RY;GxZB(5PA4aku~`gWuBgRfmSJ`g z3D|a)t9JqTtnd)6)s}#c>>&K))lGnq@*Bka8CDkP?R2h6rr1;*Dm6MxmpAAAszwe~ z2Ghn)cy{k)PiyGx?#R`^8!|FJ6m?aOqc~1M^nCvp(Z9olH?T(lpwAtaVXMf4#!P_~ z|7$OrEDsvV`*Wj$I}%^e{ee#^HqXEGYN0>aj!FNKten7})*^8K`c(?<#(c5TLxtjo z0UmH|11JM{gmOi^fdU0gu$%Jp`@rg%>^VuNx==t#|K4;0wamvqKRowi1zSIJ?+{gM zw;;d15rf+YOvlmTPpWXe;@-wiE7tldZKQaFI*8fm~Pke7K z6$&|?rcI&Z*qs8g`?10Kq=}ZPEIIKZAP^3gj=(a@3W2v6#X2TquBC987jG*eVwwCfHlL9Pg+GM($#8t3?zbajXSO?yf!XBesXVy1t9#VcXm6R0Cz+P z1_K=(M7y3#ioLh7wG~kn83O|#^kGo|++R09awFzcGkWva%~)Cd9EC_)ipCV)a{Tg; z{wLfEETi}as^_GN3OgSkA<6Y9d{%Jlq~Rm2F`$SnZSTw3YXhz#upb4st)mQR!9sW6 zZ_rfMNRGKmt*SZ$<29G7oqp5L_i{M@<9gE|z+T$gQo%@=1>7MR#>PO`&)4L5i0Ylc zk-4EziZFO1!99SG8HTvKyE=0qc3wTK4`M1-dafFbeV(5*1o+ku%4v5*&5E`^?_-c@ z{$giPv}4-XJY`I%#%f6T3aNPL7le4(lU9LnXqwH&Jf_6hbO;f>sp(A(T*; zSV`IB_P$g-$={3AQ5{%+INC~dBX|LXdI;IjbaCmtl0Iw7ao$RrJNol+Njg&r;#p0n z9t|5C+tk>Y&sj}dh7J!HsB&}9t*;x2(7>a>$N7O3$yWv+4jA+N=B4wGf-Ce$+gxth zR;^d6sc~-}FBcfUzue7ALPb#RE87IivSexqNnW`ffWz4Pa_bpX|j>1A%A)MLdOMPgy#D|-M)uwmg_LkqH;+>UnDiK~$-!0|eZaebYo8M$RQhJp4;KYit3p4T#G~W`$l&ON(Z zpR*qi33<99GMxJPHI=X6Tz(znE^qYaZ8@(>zgUQ_Emi$CZ9$eN!h*VyrKQSU`ah?> zpUIixtfv2h!-B#`Y3#blR1mt{$#(7N&NyicMC+1h$#{<%_VtEp8C(Tz)gJADen zD*{5$Fi>e_q1>1hLzS-~dfb);Yitrv@D@ciC6^w`$1^%56p~n79=nC)JRvUiEHqZB z^C^bm?VXcl@1J~Ng*(PP<{-qj+OORU1GiklAzpWntM5`IU2f=>GQi3VshHt9G6=r? zIXFx|B@U#%V$;X>|ED$q$$pMOPmFwUW>%aSBZ_ybSx9Aqke}I6*#&LA!eIiH;9O7)ah*ttDF>6SP7&EfU5y!th z)GQ2VXE)-uidpg$+?Gw94nk`yE6_`+23t@QDa?|{lHtKfn5`>XlbQs&lSnTFc$z|| z@e=bL!^Z&CO8Lrzi5XcTj!3p;ZcOpU+LHBDA$vXmN=`-wxvOsJAcyQc{JnH8%~Y|B z0(#QFd(oT#?)_9%*JE+C3vP*^6)KVWqkNHaW^@$kxH=??_5ZJk>So+i;*7Imi{e>g z*_P*;HPvI1V{Qn_D&u{>zFS(IY5)1XtLVZys3aH{&-ibp>qAIEa`L+7lZH@c2G;Ux z&vF>-N=LH4zZX&r=G*GXR?h|!YjkfdwNt3p?tfW*w|x7B_yz9A#~Y=BPszlwqzWj5 zIvUthYRZLkBmN4ph3||TixCiq10XJvQT}(-Qu0)GKN21 zThpw3`@RwM^8jCy$>XO!RS`JOL5z%h1yFs1cwE$UtD6C`kyXl(jw!(w7|4{A#=$-b zxJ+|65`!9!E-@wyoz}nhYi5)3P#ziV_(HO%vcqyhIfQ5lzDD0FY1b;vxJyyK&Tir0 zc{?Y^gGP69@>Pprr{NHs)TDjDK02bVMETgaIF91)xv=id-kij22BmZu5T+-P4kxu= zV1mJh-P-h~+!bZZF#TLmM#>vTCx|L=h@1z}*l^b>-y6%(N3)qm1k;*k>*7JdMw zS#aI)3?d?;6R0^!bnDb3O#}J)oBa(m*%Au1IdNSeS2+OnV+#}5!Z=KC{QE%Z11i0l z&PL^ll4lSy(Wl=hbA9hDT2e{H{e-@G87`wj{DFxI__RH&A@Hh4@h9k5>`cA+;txf$t??VBB!iSytDRSM+7%meupW>$_!y zg(Zo?&CY2_7^}&Fi$f$b4724Y?x?NqLj!UYpAfY247QC|Ar59#tDnl$ONig=7;%-; zqY@;5B<=(fCeFHQa}DPA%acNmDGw-P>HWj zPsh%@MUVxF1)in1=>ny_*D8eGdpnX2H^ zEY@I5spvSEm9~ry5e;EAh}*Fx!wpy-jOkkJ|7O)LmlzfMdvfYlz4WD!8g~k&F|X}U z9HoXF{JIp8b`K3hpZ90+ObHlVMvcYkY@{bRcaM4hJe28Q1sATZtsMd? zi9sWsXla@@QTF!F~aiQWuYX<>>OxIk5;3Zh&x>rR6DuT)UF%H+-@wu{)}k%HX~X z@we-Vv9WXlNy!Lbh^pq4!#Mj<3TXa;`WZ;O>~yl-F!)H#nW|$C?J!7&?9S2Fe{k88 zDYYWL?j1fh{4%hX^)RQF%d%(eU=-VDu1Rr+62$XOjX)%=CNdkTUS}E?NBWMPvqM_u zlyiYV|8JM|?V+TG}7Lcp%8;D$@^%PoJsNNMm4 z3X&BjDy=hwPS7iMP4tWFzAZaQ5k5sCVm96lLPKEA_ofByXlL>?P*g_|rBd1nqWgo; zg+)YUXc?O*avGy1XMwwc$ z%=quujD+NwzKK5ltVn|~7q^D!HT(fbzAyut3RYAE!0rjn>%CqA)|-yT#`#$%)kGKH zpA6{4c#YiEx=~0!paA~x#|n;P(M{doUzs^H6A~uF>_TmUnfu!}Z%V#AHsy?V2!Gt|c;f!b zM`SG%EGnwPo-%Yy!|t|0=cHkIM4KY12!;kb7d=K7`(VAZJl{-Qt!)LvHo_G$21PD# zmktd$*!r4?q6^1t@^#kb$Ll`>p$s55jo4LJWgXJb_fNw27{)|rIdr#3g)YQai-!6m z=Un8LFM44d&qv}IVP3?+lcAF$4Eg_7j=Se2 zyU%RC%WYXPIWv0vc@yfpja*3R`OC|7EJLgSWCqDeCzI$=)u?P%7}0q!*7ylBcuKh7 zUbx~nJLS@79{&NF)vxI7ZUhNtE$R1KgBo<03F|X_jt@3%XNp+|#1iVUjpqqtT4px~ z10lBJDO98_b#=Bn){{|js$?oDt__r!8`0Gcw5K!$x7={jyc`YSYS3)WY;qteLpsX6 zC!kZJoXV`jkg*IX=vR4QBoF^lP*Bizj}gsrXv?J+s4QnECv?-NhN_b#oamjN>xulX@_$ZjUBw^w5M?%~s7vIG(mc9-)OUKe_U76L37 zcBUThSrtM;`bOltFlt{bsrDSjz3q%za@4JvS58;2cQ%y`f`7TaHSg(}NoEvW^HTer zO3p4@$H`bcH8_(ilCBUA!l&4i0Mjqas+cRx|Ashtm6k40*KK(wHDRr-8E>3aclVU} zeTy=_R9Aka+Z?2Q=XHwHu*ERIc}IM1eLwJR<_mrm{scPWwl*Ic&T%l$hlF?+aX#*- z^EsKZ;P;+qQR0(vtxW^m4u;=^K#za0fOmHFiZeVV54SREwJ7E_i`H(UrHT5 z%viu-dFn=m4W^XHd@A3>T7ilfZs}-uS1NZr^k>EU>Y8>ZeZf)r@52bRDQ{0)bc^VX zxTNQ4c~~qjXz6TrjInq)mY5i6h9psy)9`|m^TV!+iB_}*N2haM;Ro&=r`)(w8#B~`{s7c#cz-cKk=)AD^u~pfb^48 z6P?0===R!L6hBXo%RD_CVxHZ0n+@lm`LcMQl`x+h{)azjg@5V&KVLfu)(%akD4{Jc zHVe+E<8kW9b4i!q7sy~+PtY~C@Y9NCWD+}eh7Bbs7L9PO7*XoC+&v=cqWF3;- zg2j7iO6Ia@(92wQ2LM&U_x7h$131n%Vk%B--pR=M`T60|#vtVgd!kH!1-iDF|B{o7 zu|U=2xXDp+vy@a}epLQB>tbslV?517(b7cRgrmaQmVbUHlD=bZ$S@|mA?XwNP_%+$ zOm=AO7&$o<1^)pU>_E{XK5RfK#-hvi>o?xdV!3F{DD20M&)X|BJL4<*Sx&ryh5N-y z7UHEFWfx5d5hV#)h`(iJR=}sQ$%!V9Rv~I-!%f^SCz=seHA!{~ZTRWs3D+snY_ns? z#wOB|;*w~aieeCYulk$5>izZJFbG9BbahCM~zs5k=3}?`AuO@0EKMCq(x6{fa@)*9EfxWOVdY&QR;^-Oj+U zWV(5SCL1xIs~unoF%JYHwRgbmYaSBsaVaUj>lzdkln?K-sRowFOG85uyaW4OP1KPh zTUDRS1IA91!^8huDl!iNw=RBe?!=}t>`PG1h?=e4t=k-R+JyY^ob^aPh^)K6ISej1 z(+SrbX)-qU@OdUM4NVxCg6=ECx=^jYV|YZ|3I-2KGkDj*q9o|74OPe;8Ik=3Gf~jX zDdrnS#-v2W#B_BK#dc-oxS7*m;`bJHk&Kx*z9>_VbQ&nr;D?JFy>pQ(GB2!%*-(g! z=oR)9cNI|jY3|7G1=bc!S&i>nayQTP>SDUy;;t(K0w_!By~=x^yH?U9yj=Zk8o4N- z4!K4~hC-oKzYP^ub-qE)v-Gj>=(G`tvW>%WMC_J-!lgJ2LHp+wyUjwEww%+D zV{{ccF1~A#^2id&ph1zbn~dyN2rMxV>RO!%kC-;0xd|Qd-9f2nb~GJ!xMICBcG{R@I-zHyv1j)yr;46G z&yx5tW!Z8<7ukZAJ2F1CpqwON((D7HTaDj56BqVbVUJY890rXCMbG?#p5BBqM67Qf z`o49!u5RE)dER~ejV03<5q+oG4vp5bMjeCGStw1S^so?hK1&=IUB55Q6TR4LdSm7dl@`w9tRR7D}0f>#d)L< zx6@aM@)^pEOFMM7SxU0(p$G5W&?yrBS#A1hL9uN9}5 z?bdZYun5>Im+CmZjGxlFONp*>L1yk|_q#EsYcsffYas3%yOAh26aIQ*YkSfelOP&` zcQ_JDiWhnhTGCQ{yFp-8iAArLOsNyB-J{q>VY4v#A(~NyV>&$EB4AmEqdS0A5g?&s z(D>42x4_`-?H#cbhEDpQWCi?6Mm6QZ38Rz+zG2UEFCrBz_%cu^Ox6Sx#?N)=;jvs~1JCKl&0OULfKb9AI%1jG6Yp-t( zCqW+%M0~1#u4B9hbKR5k2G3XUJ8?zbZruP@~_)lj73f;C9>Q2E48lmzi~V^Kfz5m7!h6A z_x61%%Nw(_5|#8?t)egn#Aurz-y4d8g9ky0UrRb32WQHQ!-~)37n^?#LeTQP^U6Qx ziX**Vp1PTJgui7UAD(a4n}|Jq1EtwL80&!c2nnx^YHmh)JAuftL9^Y04X~zz^E>{? zkz|9_a3Fl&_gXqS(P2akT0*Z3{eg=U6#eXfIflfjM5|M`uf zb}l?JPDp5IVj3RHYpBC^um3;LyFJkMXiNb57_;yeaB!P+KY(Ai%i7sW-G5#ylsNEA z`kCrC*{Feb$u$D6&D34>PKwxLXrY|Heml>H(|OXBn#pXzI^&w(V5I`=uYf^yED3*< z8aKO9YYzAVfjHX00_d z`G2OFu?$BFjcA|VIVNUg>X&NIWgK1!=38H9I;9@W|F9=*Y|M{)Yh5kN`RA0(U)IJ1 zKl<*u?;hYLfH!poWw3%xJBxND&Lp-y0*cv}nzXAjraZdMjxAU|y3h;`#o_JGRkcTRwh6@fYqQj}d)ItjWw9hkbEB zEHR;v2WH5(VR7kwI6YXqpTF4!>Hnoqk>~U4@`t6Dq2s;2TyB<6e?545>@fUt-^=6^ zVy+=FI4Ed&oC@V&2r(>c=TNpufg()NDvovOIaA4cr6h+ z1M@w~A1pVPncN9+ekBXKU+<_UcLodlT}0p3a2Ij2HfiP#7t(q_mcoHBD01T_vU1{} zvG>?xQB69%xKK9D_JMXcc%~4=p`)u0)`~$dxV-24GfBVmBe=uReb`xWKw6z>*%h7f z?atD zQF~~^YBHype8oJIh6(E*S${r>&Vb{BGzUs-B}tXCw3YyW$KS;W#CK7rsgJYY53gG- zi2Z&26fu`D)9%H1v|ckBJ3G60{bbtq{KMJZ)noZ1SJwVL+q#hdzM^fjh$n)x52ssE4kbGy3qmvA^>2+#Y6r6h!cD)X4?V-tGnyJ5usPT)3Tby6Lj=T;=J z56S&t#*1;C=$Vk$hQ%O7CVzWXVqqYoD_rTbLsWC?a};LvM7bER&$q?V^bktvzjGJP zNC_gStvVpwB+M>>cG7wc)?eqjFUQMWxX+*e1@M!A3+d;0Yn*HCr57o<4Q4^-mhxgC z3**r83&kLNvyz)8j4E$F= zb+BaI4r86m9=&HPzH8H)(AM5Aw?kzNKv0w84{|msyJjt{%{_akk{B$earoluvf<5} zS->q&CE6-z62$$U);)U4Yj|*t;+uo#r^#NPJ;otskRV-x5nha$Uk?Rs@y+=o`2 zi6_P_jX8`ea1OeU(VE_Vr=y@afp*~bFkH|#tsy4^(ueCsGXG|US@~5$Rl;m?4jf6c zNgDZRxeUOROWo`JWXun>PDyf0|3cIK__!ye>T_&b@O=jSJEoksh8_X%v$5u}gmkV5 zL2&Tg8XJ-t=?4KDoXCNW^;Zy3Aliz->coWynL?$WZ$zS^gzQKC+ojrf^P{tMG0>xI zY;L0J&e!wbkqwqy`xc)mI{1Cu^Rl9)NYQ<&I3vSbkhMp!3)i%XXbxdN5-EE$m~9_! zX1WuC@^W7Cdp%`|s3c=Rr=ocE6;RDU3Qtvj`s9&vJGCZQ;cllEnZ+rDZaNVss)6|W zH{6B-Gn8ksZ{N~2;9tL@zX!SXOEf>@3-{si2$j@SH}Rol5!|oY(q=+v9Oi}gLY78- zE-g`yeliQT8`pKtj3tKSa`|$r#4=Y%g)CWA!T5M~LHp67xQdEhSPH1>`Q%4d)XN?z ziwKis>42+r+j>&6BsL8-RMpp<uf1xj)7L*3N%<^VVMVQ;-CbN-?d1;xz8fxa=Q}pvW{x)n_kQKPd3{Hs*6+gowk{6m`YKsGk74Yq7C+;Qf6s>rMQx&= zgi3hR`1_F-v_XPoW@5_UR`P}*4uOeOd2LFHhmW7%9vaWV5r&EA9vT_~Pl*g#d3m|=E82{EFTJ9_yf`dsY?MG4?i|@zp6=sS$xeQ>M9&bT z{brCKN+m1tG}Df_w)WbPg3(crpG(nMv$!XU)KWliysV!+K)`jFh5n7m)BbdR=4KjZ zmalClT#w39cpl#!$%Z&O_QCKFmPJVaJJQwCA`)+jj~@if-yKU|E3GE)eQZ*;EOC!; zk%JCGX|S2{_cw8%cVUq?6R>Dzo3xz#gY~VePp|>>x9JkS!*YSXY)>~gWC?Yl*UEWo zu9g8XwFaC{-Qpja!j4yGzZgco!#oLD((6x#BK(Y2-C|Ed!2*uWq8J^CP}BeTaCaf# zk68eI*~8Mlbp8j1pS<96y7+Dyc{M32G#O5o>#%;GHhc+dWO4R`;kSLyk!}InC=s7r zNephD`rS9QB*ni?f_}CA0`7q=o^)ezX^G!)g1eoadJN2RjA0O}3J~z^jcnhrW2eGf zTKMd!53%mJV-Ir2+D#WFd=HO1Bb~uwGWiNFB5y^@DEub}llVPewrS0hJ-_8PQBn(9dI%_yo>JoJV^;j&EmkPV5Mpl2XbUs5v+y-uDcC`(kAX;DY2F425XucD zY{<5ydgYm!uW!q6^}f+}Qm3_PV#M$4RMw=-=LL3V$$CowJ#)$@c@;Tu zV`s6blHeI+S^}bm^7a0VyiZ6^>LgS=clr4CLVk0#2?*NshwcOYmg!y{fcfplZKaaE zV}zunFKmf{PsRyMCX{Z>J%X7#AcNEatqd-~rofC0>hJZyqnIQEKr&!tZ@@@PJo@z5 zm4q(;`p1MGxArX}BGJx`F5Kajslq64-dIWrZ><4~)q&5BY^%+JwiAa3Mq8T=-hD~0 zx%6tB=V&q=hp69fjzlSn9yJC^27DyvV0@y-Nrj0lv;NH1b)cg%JuOt~m4u-TF#kH7 zfUe91*IhRPhy|!N@t~)xM3wYYWo4k2-UkDIm~99nvVKp0`qlK}5a81>fVtE=#v{20 zzXl2EI`_sn-d~HNY_}LxmCVL;XuGctu{{`j#V{%I)JJe@vPT)Oimxo{%(b{(vTw3_LBZTH&Sy_G2Z( z@zPi6$A`9B-53YL)b7h(QDN`Y?7yX5+eW{WyKI}Cot>Pd^r^Gxz<3oycAF_<0U+3e zEHT+9mriI- zZo;6btN45g3TCr`%|m67kAS8b-iI zkrv0$W0Q7UH@pY&?YbL_sbOY+rH5TJ+ZURREJq+Y|JmNY{P*wg_Ou6JMG$0O!aqXa zL;i;B_(dk@rb!N??9Ib^It1Cv)o<(@i~la%DpAMh8fSvhmswN%hqr0P6|krUm6}`} zX7vIPpQT|=Gc+AhAqLXLzPo|M)vkMKZhTE6%e7R|MCR`6Ej(0 ziK~f^itZy!BmsdANTK1#`KNY665=E=SBrx8jt`I6=8n1@PtfJ>xIJA?I8j$VhfF@Tr4mqcANZ@u=+KD)s7wuadt8 z^-Cx=myBU#TvwdYX9x0&*XR2yka5&=q`nHd#a;mu0%0lwSIYOa(z-7ROH=O|C=;NJ zmK#(BuRt*hboB?<8r3a*ELbIZwjRTtDe7X1jODLKG$MN98FIuuFt!R72x9sbwfDQR zG&H&{Nq+wOweo{bd!{`s^GcDgP0UKEN8mvH^yjYxUG^;II}W~3xZRUm=dECY)8iPg5j(_PalN}D`6%b~ob=h>G^D2L2v z2qp9GiiKBJXIb-(Q!H(N!6ygcRcGs8H#Op?GaMht7RN_NAoj8olf6s|A*?LsU?wDC zQ3D=OISzxN8O%hiF}{ZWIvO_CB7eJmfSmgjDvg(-^BWC)MbLE5v8z*~8n zbBH_Ik)$d(1aaAd%D=}{x9$EehZRjJnQ)3pOuXWXNhZ>Q>kUN9Tj_v#J^YZfb!rP> z4c`B5bu%|#gpMs&Unwam108tg1+kJ?L$afPCXpt4IB@A188+WDKf{e0sPYn+oqTrbk3zI(fqFv0hbp)-K4( zNVN=p0Ac$Pw5o;2k4HN@S&9jS9mdk(=uK5wO;(r=O) zovpR}U5-mxL)G|x^xb`S(S+8$%6Yey?k+xZvI*$dLy{yGL92Ln**QI~_d-!qRmZ_D zWaV7v{5YQO4g7Z0HPJX!u^Oljwfumoh2fe*ZF_sJ4We|nS$D#+_)bNT>j9NSCdF_(YV%MxVUhu`u8fZA^0&L>5GyHh(&cZrUrB|A zhqu$Y%s0JQ?M(!Ft-|PYG(-Cc#1qWdzQvH^S8Mw`)&K+&S}9*|2t?8r?B1}tN99J-UYeOdR2BGl z(CGVY0XrCfcIZMuFzBg?@#+@gx!nu6y2PM=P6gH6>An>uC8b?XW8($r?W=FxGWm}( zVFg}0dQE5vsOuHLDe6DbeDcK7DIv#9$n3>STppE35OXWPq>~y(GFvda%$J}Jt~mux zRlV?*o;Y5KmzG0UAwH=fP>3Y`YCVH6d9;J3!|e-j65?NO4d59o zanfiCjjhe;>;08jQJ2(*jzUx@z5nQ?d{rhQ^z+{7ZW6oHtu5Zjq`IgxPsx2^*v;?c z)G44dTNiU?LHpjV&TZ4@yEVfS=0%GYb{4YQiGAkh*@YE{l5Rv#w>4Q2?-EAl{IfoB zLk#{GB1O_$=zGGWGy&KTaiPOyUbqM>XQCj{La0TXgEj2XX_95GzQIZ*FWPVg^IsT4 zi!Ynr@*pz#zQG8I$IZSC@Q(y(znO=5a$#`05F#INU!?=|RY{MF!*{!R)4|5(m^JnD z%l+jr>k)Ry@*eAWn0)3b84$O7xq^NHjSKkCsNNN!FDC&XX#&Q~)jLdKpg$yhaz+^}j@ z<2V2C4B}Lw5Hbh*%>C$H6?CUeg;VevYF2kmL@9bvGvq%1u&Cx2KRcrW#%tch(|gkK zTbgDHv5xm&?c@0N0yZc$adFuVM#oJB7JBrn#)?@c`6Ep(!HshT9BmoBTFWpI|0WTK zlyp`IGg)3cL@mHLQ|O~cME}kudrxKY;fB3qQ3nVa`sGHS;Q+u%JqGLu@z_wR<}(-K zMFN4sG-NyZ_|@osm|*KRuB?$!D7NdSrV8vJ3Sl564C}=8_kvQjSTm>H4sPUpwaxu0 zgDmbj5?Wf{|E1q_t{{TuvT11=F#3K035-@@^D|R+pUrZ4^Ze5uZzi4V@%M_)7ds+m zDlIabaZhH1iBUhA5MSh6{i{3i;#5Dv!Wp5Lo>;ad&H!j?kpLRW2l#6M6)gSq&9|tA zVQ9M#fcH7rCM5}h+y0+z?y@;jerjnve}4b|y|qO{L?px7|1=jy7$07bXs8eD%o&^8 zPIiq{%dT__7HbqJa_Cc;<$Be+ESSQRfW)M=>rTuhaBu6CPjSO=XLG;8Z)3TGRm(Ox zjk?6BnM#(O1Q-Plsv)xTcr5`mY0XwAm?&EZUNCH>)2jCj#pAN{v(wXlPbL?8#`Tsv zlZG8!der6{JTOm`w5EZ?28Q*R^d<8(TK-wvxE&%5%4%`&%Rx~Jl0No*@YUqkfUxlk zyi?&(xYd(;j?eFB(gw+VenrPcpcLnGIQ~|`GI*&}H@|r3+x;ftR~EOcXY0Cb_&;?U zpei?jxb@j_5}8j%zh1^gMl$}h1M-HSI_v7q#{qCTI6`~oGRSo*^{gQfXpv+0^A4^X zj3#3;CfY5p#!BU{&v8i_e2yJBtRO9pu~CHg06s_TbL4D4*l8~RI+sTv>3f>M=9K%H zliX5fINxY@KHu|wUZ@)m%-Ox`xz#x+5qLE1nCyNyx=LHoe|cjJ0{=TWMe6Cu!kijYm!*I+W99d96_b^{ARg<5c4-t z7kX7Kv1Q8$)mh8s!Ts%u;uDKsP*pNoRn^q^?wZrsf6U9XFaL0rDN?RF_XX%^P;X`- zcfgFGN2WX9bN5sO$CoHZx_p9?fRx=&2KX;=_paQ(<2VBM76>pyLs~qMgJBtDM}aAW z>4?-%dS4CQqVeu>+~ZIxA+qgFW*@A!`3ALu1fVd05PBeg!QTP;Ha-pxQ$A5X3|dST z_jtk~m&$Eu{t{r3E4YMVYZf*(f`#Ts`0GFbC7c9O$;IccynM2e z!5D-BP?N!-g&dg~K|sfW!lm<8C#UF|TJXtGGlXJ>H@^W=i`(>*xr;obw^7Oj@&^W{ zM?^%J2+BTEzFb+mU-ja#^<5QzDx2_yMq9G^WeCThq-f9g68q&h1ukdy-s6VxgGGj= z+N4NHhBgTTM4ih5{N8^D-*_uK0I=8$q!V9F=E|aS1jFz*v|*S|H9mg4+LuhEFjD=$ zOnwni7(m(dwH&tbpWaq2G!H_>ursc6sB^|<5_4aDqlotP>*ubRE{=}B`*-y9tMV;E zF!oQ0ya(Z>NJ;MtJO9jhymc_AOyN#K@q@y<>J7#n zkV%=&+Rl30^m{y8XCUx6Sy;FMbc0JVssk#peCoS(eI&);#$i&w2L)Un`uSTUm3A;@ zFe#@%HF5ZdeV6R$4{DCo%UT1q@Tpug)nWx~{^8SBls|rs^mO+)*K>WzM}J~jjY-As zwH#i!j=HwNw?sB^K)N|yhI7a15qa-qcR|xw!teYUEa922Yc>MJ2{3?!{edqqg%@8G zhrt^6*SU_TtpZ9_kYB+mCHUg4LwPvrPv;(jDmt6CU>B;T&L~S~!(_IZBvwnxMv6N% zkKT7WgR?|;Z5af*unH_ZvKW0{1IAa!sEOpy`UXJ}6=TE`myp0jLlYAhA2wJY%vmT@ zO$I!NBq?ac_JAQrcKHE0@%PW{UtqIg*rEq(3orq@aDEw6fb!Ale;BK^w2}a{VehTK zUtx=Q%kW=enLW}#f1}SM)_4;hY$Li?53E!-GNh6hw=o zQ~aPjtG7~F0;WP`;W`%tW;~&k6mLG5{f&EODZIGqs>hR~e6&M_0*lLLHBXR@&6b zx?PjSm0+}(ONokge4F6qMBCut?oggQpIjz3#XLL`;5I?A3fL+Yc%B+t|E7GY4R6_T^bA03(G80QpV3md$cs zp?W+ZzlLF%$O`z*60-*=bp`;RU?*y#WABVi`SAxcm+xmljwaUi_zM+y>j(0v!X(; zEXCXQ=<|p6!>!kPwI2-3{`}iz0{j7*@G~y1_rSF;cg}uxR9<$@^Kv}pjdX=dKMRUW zz|GKGK!4At1`aVilHHll_dm)Jt(`;3_`$et2d1yOySp#<0}SeE;FLjnw;Gw&5IbrUOk$>KZG$OLRjrKoTe?$>S7m zrYer^Hi3o}6B&s$LZ3m?#&|5A1RNJ!eUJft0mBq&yea#xf|OnO2!Lq{PrbN^Y56sX zUZIv_O4RvAw2OK<)VIR=lF_H!xcKWF`Q!c+sfQk-oPg4lPu@uver=Mx?0B*!`>_w(f zemozy`o*Cp|Hq9_e=7uOnRdr0nP)a!p%4bZ1BlZD3O2E|+}vD<{*lD5EUa$%=j84jzs*F8t4@Mk*fU#*%0UzUre?tidP_TD={J7d0dsXwg8IRx} zxC4t-5|=?%di|0R(n1-YNP)SNt0(!KKg5!I(X@13?PIi!jWN_c5d=}9T^RyV)GXJR z%^dR{k(9P!gj^WT zBS5^W$!w)o^QhfWB?=a?*oNr$Kqm<`@w_^(Sj9bjyyb;))r|zM&(A!vB}A&P40?E%&!=D zQ_HGq$da{JRl}}tQYL~92`BIytNT0KMYDsxPqu?Rpfb_XDe~dsjGXL{hvW;`QXu$TO!uO^*G}h@9fbxi=)Lo^nUc(STb}#tgvvzl^ zB|nWu9icnRjwKlu_OOTxg&+ee>H=T;XmM9ed67m}|Ci2B;vSvCZEcp(eaF9{AhR5g zm3Xf3*LJDhlIvGa0L4$yiBX#EXMw15psB+^lgfNhmGNnWN+44(D9(XRLDXT4116V% zYiCc@duT8X=nHY#utSA3$gb<((g-ur?|-VTef94_60li&pCB%SG|db@wd*xQoKep! z0J~4Gjz%>>lQ`S&VE~#bgL9%{hWG1d=+)R^mg(X z{f@RAKWE@nh)}qzu*R^O1l%2=Yz9pM|3ajXjv1qq-9g^8` z)z}w&gXS913;qnr!faehdI=?hkDpz5n>n@R8ojmox~gru7&Lz^G8P$pskdVg9~Oq^ z55du(+8QRF{(}A?!up4hKLMQS=gFZr5Iq^dEv1$A_l1!UkZo$~>OjlL1F9ibZ9$jg zC1rgVAW*OEDHtpy1y&r;He6%iu|Wwgyj~jleCOvhERq9+hDd`f3~%)Q+q<3s${=8; zK>mUf9S&Fr87#8z{snysK#JbPy@b5}Ek8NF6VBG}vC>iaSl~8=lADKWX4`rEivIhp!6W8}+`ld` z(oT=QIPg9A2O=fG+6>$Ws7L_~;n%%~w+@#RyhNwX$Eiy%;A-B{Es6yxexS1oUexDj z%wHh7pZp&BWUKi=ZWop!oI9r#!UA9uSEwn5l%Ylh<0Iq(kT)#gTf$!kTC5)Yf8PfN zPEUYpf<5A3h23i>pu2nYdy-J?FLu0(t!;!P0I$hTlkspqt8r(T+43L5g2VnfSa~W* z#Uo^#F}JrkpCoUHPUGQ_yfn>7zqB2Do0F3sLc7b=r)ZkdWGKk*I5MoQ)v>rJdT4GQ zCrM+nx2apzTU!`7Bx`F6VJxxYC}15XEyFlJ^8#&yonp#}|W9t%+Kd3?Pr z&*B*ci4wN=ycPFg5nz$WXLZ4m0#guxgMmA0Snmq3pVW3B9c=Bm zFmb1W*A>VF5Rsj2ZU62Ne%iS4GRF4+WP=>(r|$r&XsH(hG4s|z9R~;K0q~A~R!Ni_ zXg$4fY0DRydpU)Q+35qSc#M7cy_XKc4w0rc!v$R?kIl@g9s*w&e7n|#*N3Qqn|}Y` zo!4dk2HDYHO5CkfUf%hejrCZ;9ChYAdg>pl-Hyws7za@g6jZJ@5+}Yko>F}5?x5hv z?V@|r2cIrESr?3h!0d@ae1S^>!((0CpUnXj#UP-C67hM-EPEPgVZo3f0T-16ScJM2 zuxTaq9J$j;m1RK=htLJYdq%)K&p1?&&J!Op9F+e1rT)e8nMt*s?97v2H~ZgNH#IPB zL8u}S!R~Dt*0#5NzjZ29az$EwG|7xmmgLo>qA0p!z{r-!II=x0*UO@+ki)tfA3-Co z)7w{PT#H4K4#c%%xwR@kLxzjId{^1s3eza^=PO?VxWL*5o+*gFTf8loBjAh#XEMmx zw0E(DY2g)S{mic<==X;n_y1g!NQuyh7IBqyE!U#C#?Nct&~3jw-@O*RpBoZziKx9Z z+>(~!A+}5mfpM4Txq#bpz*ShokBq|{LCM9DcV-4}mmu}QtOP^Y)&{ns03f^ouCP4F z@&=*a?3te|#2NIk^t=g89e`d@BwsUvZV~5^^YSR+OZjq~UvqN{HB9uq*SDlhezw`E z@AIlx_Cv9<@1ggzn`g;q_0?u&@4}R(XFu z-_G!Mb8@Np?0l#8h)sUf;5K?bdzPXPO_4C*LyCh#g_O%7X9pw?;0VC^ zQuwR{5G4|n%3WG?<5m5^#B;ekMQd(e#)l@|^k|8I=JcRMuC#Q=Ccj4SC`som5zEl2 zYYo!FCtb~6Q6y1IJ_iGT97(h7@gFdBq5h~5Nvc)-J1LX`nlqRsX7TB{y{7t~}! zjK}-;r4RJoWy5!-t~J zO4#(%zvY;1lVvdmF~+z*mDbo^<~g}kNm6LCtF6{C`T%7fpw4&(=_@EGy@4VcZ2J0j zt(u!vEoBNEBXGY%f`g&fL;ANN(1-o*b_V=2`s0VQM5r++EV8ova_C{Fp>f}@<9B}& zyTp0?q6qLQR6(44jtN%JxN7nL983VQVa!F->B$p}ab)8b92N-mM=J>LHuZi8K{S~Ivv*E+YZ??cOW^9EXHYKa!cA7ON=(Ze+O?q&fdI8U!n z|0M^3cZB}}zMuyGe_uclgSAIs`ZW)lpPx5D3ag-Ge)ZmG;^Wi0imrgU=cFZ@v&avI zj8YuF@5I2Y4~ZJ4_#(x=D$6cNSUansWw7Y96y@TAhuJjr4~t%sIf|1P3EA8jcxd^^@;L@H5bKD;aPFixrqG+CxEU z1n#d7G6S4INI(eQ70E0YsQiGnSzCZoO20%a4_+rMXXyo$+8*i<8T0@D$~y;}jPa_6`md2^9}kTBuDf1oD)?;prO z|NR38;J<(V?}wX-rSd57AN}u7As#F)F9ZF5*(V(U1t1XE5IX6ps7^DIpgQ{nsXfO~ zB^V8xiHQlk2pt_AP@#rBG&??!FG05aIzS~37#j2Y+sw20DgB-U-_ME8#<}IJl(TcjadE#FZe9aESBG)V)gX?`<4H5KEB`?J2@eYcG1d-lWEM4a(7}I^;vg4F$x-kx zf#U-Z?_`cE&ydQXCYEBKgjMYs2z^^LY(5}I#*Yku>;o*y@y-haFe<&wYhZXpBblx& z_I$?#59eVdg~K2TyrwYkZiskMZOGGvg_U4Ufa4AS3h0uOOD{G6?h%p#O1Z5I=bF+~ z&U~F6Tuh&;s)i}y{oz^Nhx!qK8P+Tmi%cTmdHMpAk17Wsx2J-?y1ze~3+a)%Tz>^0 zevv}!D-z36vmd{c)c}sAeiLwRJ@NJ-$cjr5kXe5uv3`SE%42=N-rha~>Pet^upY;0`goDXGKctQsA0{U7XWJ_YNtu^~t zu+)c}x%t6mT%|sOuW@$d;ts{2Dv*_iV=H;lXpPIr9;Rhu7rT|) z%z>lxqXQypB1t_RCI?X)OV~1x&O@GXSxXXl^eZ4G<|Px64?86Jtfb!U!K6n6fysjF z=ijp>c3=)%Od{Ib`#8OVKrPk6bF({!*ao?8X&G z-65k`arq=!xt5l4x0}3Cq1`5vVs1jJ>HG&@lq8qG468-nCehDD=fmjunaBa2U>PPR zypV*S0J;Y7a8%Me@kwqX!e;ASTHIDJDAM7VPT`@@S674SN|MD%W-t!m8RiP?C1vvh zF})AQ@lH&nt^nX5x_w5$nF=0MC?Y^gk;8?@iy+KR2X{gQ|3+LyYm?KofOj`gh{)im z566}5sD1F%3(7?-b-V{TQvXV{3y26Q+8?{l{7e)~hyo5=P432@z(=lsA*T1=+E)O~ z7sd!04RY+LBrh4xY^yG?O9Y%USm03M2}9-(sQFS*zlZs{_5ghZ!bd!g9A5n*tK~lXOD1@f$BQA-Z0QOVfT--4t%Emlx}F^UyHOO+z5y@fE@-phUihYPnN1 z!Vaz1V!Du`Lw}uvV#nwJCcD!!pq;_=pgISYP%{Ktke-wxtjRb?!?eC77`cZ4X@it8 zEu%g6J(}mh_M@bz=epd9ot=1BE~f_9=>}WBU&AmY#RihTcjBNYQ=$yr>i^{x8CquH zIYxO~!Fn}ipj^G04)Q#Ig z`fLFPL8*;*2SPQnxeSO_yu2WDSxR#&z7V7;vKjUEQg1<7(K;;Xcw;yu9p&BPD?X|n zh~pk3=ILmE;mJuUpq=P~Plbv;^v0I_nX4EF!NVJBohvfB92vl1Uv%(^1){Y+6DQ0= zICYS7pb=yOPlUy@f6GYqa-Ea_jW~}(Es(PPy|-`1z4m!VRS+XWhE$9aqyzlhZ>0bq zV3hD0@JgG0J#OJ1U~%6A@|$Gb21Kgnw>Uz%EUHqJsw645@x9P_^EmoL@5y}Qh)@nG zsBsuqYo-rdZWY7CqmsFk>wu*JLbK2umdHnihhw<*Z^Yb~Qqs=fiJaYe9o9|)tTb6{wL~RP@idxK3 zrk=$cN-9#VUr$+FmJ$<*s;Z2$V~XV;Hm`Ys$!YaAVZnp##Ckv;k>T^WdH@I@}lD~JUzL*|JL24v2*?2exji5qFFAV!6ef?kc45hJ-l zRFnkv44(U$Lbr1&g-r z;#DwJmMrIO!bSF8?01XlLyZuf{J9DQeg-QSST_f|xl0h_qH9F}CjhJvu|M|P2dj_7vYhua!|y~x7~ zFgM5uUju8iA@z)68s@*2Ji`)g!pTFao@tdA6^+~hZPOJGa zH@LMNk-I8#a9n2{I%hkF;X>c=jLR={;h0c=VWy>Jv>KE+6~! zJCt$8bXhfEw!ncTrZNJg4XZS>h~pQ22c&@*1edWg@WH#AWYRC)?Uf=?-Oa@{L3Lwc zk#{~#H6PR$ko@LP`Qk=b)aBwO7iMx_#f{4-4xSE96yxU6(6H7Zju!9zW#E32Pgr+_n0{TXUk#`55nygz%E@la^~vwa_}@#P zpDSO_ykY!ZBO-2 zoHlcC5ogiKM7nArcQ4^LXc=>^0XWd|P*Ai2d^^e&ZJEbN3sQqt1y4GyIAzCTuPDI2#emIMH;vI zSM}Xx(9>Y4XBCVy%_=^JOVOqeR52K@#-FW}4zU|8PY_K*4(q>AOD)2D#_(B1KCDh# znIq`s9m8t0`?)aqsknP_?Npk|0uUBbK6b*u7M@e5Tg{u`_yuJsw3U$dk&F@1G2Jmz z41`g;$?h|CE`$MHY~$x*{LFd$5;iM@F5SF^ND{s5W1NW!%>M=Z3D{*m8dTlPSNh2b z^Huq)K}7;E`=T7x7Hb|ZCYub!Z5_1g-nu>VUbWSHl1;{o2Ab~J zL++0;cl%KpSq+9#2}8?kpeV3d2e8l44-;4fpq!S(t)gAW30VpTwZ}a>*V)Pn)n@5N zQk#wIX8_@H)QQn=NH!ZcZzNaN)Px4NY6I&tU#!Vs{d*{{15TNQ=avu{xmnV_+9fhj zqrjLxNoZPDC4Hf!sW}K93^%-S7*VhWo4xqF_smPmw#SoO!WhplE)FaA1XGA;Xw>}5 zO~2BV%O!E3JO!v70I8tbA5lFE{>>Bp|ScKG=q4u>_qy!Sm<>jxntJ@^>y%XY*KlS$)=T_J?xrlesGq%!Km* zIYA@UUvYZJM3Rq>=-oePc@Pi{oOQdGLMOlM6N{4%9F=Mf8g38X;)V}-p2O6wBRXA$uj?+S*Wls6l(n(_~NCYBY;5 z*1;U6Mb^`p z$ApCm?(-R$s)!+*Hfo)w!Qy6L`R4reROVBzL@2*ENfl9q`cW93TNW&3oo1E`RpR78 z&wGW7bwb6Wtz||)+2nH^Jw#7N7OFhn^1YMLHOA>~@9aCx9D0divq4>t*o*N)ykH?u z7K!maPc)a05mdx^zyJ~Gx_Nr8;bFo#3Sw9t%rP6pW(=0ZN_?{w?j%de*r|#FNoSdg-3q2m3kVZ?KV)c>f}tHy4S|v)sH<)7F@GI{Yb0)XG+Af8_ccl zbs4^8TTl1pH7e^1^=*o`xxW8|^zPLc#KhpXAHEhseWC1Zn}^z-3*&Lkb~qYe4hJ?d zm&p&hOR{F>p_M&)KsA^t%vH8nC%F#dkQiIfBFjK_45I!#^%aecB6F0lyhe~6Z~yAP z3zdO{YKl{>QYV)EJPV2ep?Hw`YiK3-u@5WH9BRrs#i4mECRU5b!8~i~lPzJJRc`{K zUT`HPVOrByx}rpgsO3(7SBhEta`Y{P!_{#MXQ0GSf}jndHBu+kFkTjmCOO( zngHz@rx)j%3-{w_XHH^Xm+XK@%naU)%3^^p`u%$MHJnh??7z+og^7VDI-oj%S}k`E z!pJ?yLH_^FA61ynwHorrp7=%49{mM9Dnhg8#U$!6lLVu5O9^zIe35B5zCZ)0rHRI=mt!xkeD z2!WCXyr0zyQ6vJdh1|1WC9KLPvTIT!>~gFhDBZ^tp5ww`HfvMHQGE<`d4GCXC)qLT zcpL@r0|$Ny{O^Z*-^3Of=gp`{vLI@~p`LhgoB5^6na*swM7hmiLea}86JXWA8MZk& zB1l#5iN~o1gSGyg-{<~89&s%9c(lgKSs3_={o~`V@o!6S+`VcMZHHTFlQ1xXk^ATT z{1p^WP>w@~gu91FBiQg^(*9q0%mVGi#9kD-&f+05*jEV%&Inv!t*tUi<}mjh&BY?r z4_n_ea^e;Z_Rjp*-+%u6+1YWfNxAQghrV{#buke6BHd`=T8*`YLm$h<#i;4dLW|3Q z?{6rxI#E;XYh2Sg?yPq|K|Fv)yqX=VoA?L6ACuC6z4!O8-4CSqck$ah zI)F9?+lI>v=rk&ZULmNR#9@1t>0_Q`|CMh1kr2?g$I2f4zA!27HlWHgfxWvzWH`Kz zmf&O4%79(a1$5B_QL@{>o3;GLiNmbXBw_ZA=8rFdc8SuWTjLf38c5%XlIf&-x}-*T z`tL7bOAz{H;9N&zMpg{z-@QSYxx#VGEuU+7cMurk@LM@4f^T4S<|klQ*JnU5Kyu7Vr>pc?plu(9wa` z+y#4qO4u5 z#Rt1E?wPN-Y$H=#Q&2WpIz+r~sX5S{Sst7OP}`IUOlKnMnhHFfhxdp05b!ld)O`BKNy zM@Qkz`TkXpc;w$z3sW~gCcV<4)W%4|Qf}KyUv`>W}Ykb;^&V%FD}Z)3M2>6#7|^gfpmc&N1fa-@SV0KTQ8RhIphT4{6Rp zuBuUrOtM!tKz9djfgRXCG&+LjVRV)uYi1#4YTAn+;*ieM3pcIZft^a3=B zDO>{#7nq>H`hV-MvS9a2i6km+6duG{974hht-N+Hza}dDc;6%ZO<0oOFTV?pmiSal zKmfoDe933=HzWad!bRzeaz~cewGZBN7*s9-**8&P#`{9V6nWC><%}wpD1BvJpeuy; z2z;1&57$7#N$fIT!4NxCn=xIh={?Bt#! z8xwSkd#rU!`?AA|gKX)*Eu@N(P|&Ga-9yp|E%aM2Qr9p!OH- z;r^SbAwic3oCKuUh{#JKQ(U)8|J3vNcC0wdur1)MAS&Yb6mPDF-cKSJO=4zWEfU(Ja&iQ= z$QOMiyNWEXaTuF1A zNO01Tm=UXb0ZmDjN*$uqTEK;YMMUM72RNF6`%}*nODqZ+M0B-dU?J?{bAt~N%3;ye z2B6!*ya6c2nEtLa`N= e`(-h|+FmuYna#lQ#o)^M_k=t#a*2s!SQy@e2P+RhxIlB?oBkxzS>_c97?z&z!giB_Ucl|tK)rQc&tAT8Al-1m_0mX75yRha_j&o^W;-l zUUjgl)Tyhh<1KuQBkiuX#WsB9@8_!%4NIfUZCRAiL_SD!a75N}ell&C|FXo24Ewi~ z)fnPP5iJTTOgI^=JjSD6?;`#rywvM2r^mdQUw(LC z5CIl*wHY6vUG=Co?4t%wC`_vk@Fh6vSnH;N1K5iYn8_>WcEUt2{LV3&NL? zE)5~6PQ2Cf??DQw`}u z1ZJBkL_7(uT@z5XE(_OZ_C>$bmr;oobtbE4$JQy>fDmXSqz|cunYD_{0=|u@sce)K zr3O_@mNMwjunl3JjN)#u?$CS#o@kh|z#hw;X`EFwECMh+tC)&{uQ*H7lI}E(jrn;- z$1Ji@&$Q?bBlZ%iu#u4wiOFKoDMN#!;1A@?YZMfe^YKJ6K3^A#yM@@v?~_Ho{d+Y` zwU7Qjxewd_&3Tu}8c*N;=K*x?V7_}WMx`)m<)(NpwpCUycin0#*cC>sV@Z|xKZWjr zfa(bvw?h~jWtyzVTLBxK@u*UeWIVt#CuaHK8ej&?+wcIJ51V>AV9&z%PUsVz1p%|$ zk7(ZKXd@^QdP%cg?8&t-M~G>2mWsO;TeO(nnbe`Wgz=N@{BHR`_9e7HgBI`&(?5Q^ znraHT_NTeC-?4{klUod78^(OFjp5MCPca6Cv`x*-_`H1SklvDY;@gvc>;Yj_!!6f3xZ#ug&9$Gc5*2KVTxHv+qEBZ8&^UKFjmf<7O7RYg5`rQV>zX?%pHS@s8THsoTLdL9C&)jDwJYOD)|A(`$Z) z=Yf4mh&n|zGtL9^Z{jJcmM0K+Z@{G*;9=jhm3~(T4VqgfFW<-rmaT zdvWA+x)rW(k>LJFvVm$IZiL+m5FEvQU{&dxo;G~qL-h*;krKa-TP*!~cg+1yb0Fd+ z&vslz0bTR3jj`0yk_eHc%t+%Nw!^xo%x$i%G1W7;H!107dT8 z>ak6_sp(u5D}hSZgijXwN=ew3l_B2YNoP^Wv)@QIAmh+N4pRo8TVNsNFc!&T z1rshpNlA(z#gJZuKSM#Pc)@qhoY9DAU^SQw5Y7UeC#VNHW9E#vg`P#in zK-zxK-vsgElcC2Xy)B)hpGRYu1H}@Q65U}tD$|&gp*<+z_5y)tOJ14`%YX8Yw}VEz z$Yh27T5gI_WTik{#ylY48se|Gm{^VDzDPGK#ShT}H3F9JB(Ts~A78nzd zp+i74fEKk9xlJ%#wx>u~vh4iVobTj(Ip=N8_kPdseclMmX7=~x$H5w`rT|3p(Eq&W z&3QQ&9j06@xazKZh_=A%G09<01&sPFj=sOk?Z`W@7Npl{pXE(dr0ma);iUPM>uFXP z=+(+jPdx&7190U&R?-Mnc8jaE#;IG<7cSL4wh8HDc1y<**4XmEhn0SB+9Uo4(Z1?D zDJ%Au!bD0@1$OJ3sPjj8=CM z=R)zvxAk>*iGq~;0-k^_)z(cMQk$Q2QDrfe#`Oi^`@*wsA+n7&PfLn3G_oCY7}Gjp zZZyY}TXK^lSRUdo7xnhHb2&!F3YP3VtBb@RdN-NwrbU~yzQSj1%s$0w`<%qbZrba9l$M;jVUJdWqUn!x`lQ&moH~Nl;K0PLHI_p$mVPpFB(S90p zelDX4*km+)BrxIa+)+=yld-cE{o3x6twkCaqikP|H_!Xt>7||PMSBcdR0(yF)4S^9 zl)k`yY^jh8l^=U7$-BoVbtZ|}|(GL=d-GO|=Gz)XvWpLLf(>@5LvV;8W^}wv*5> z@*eb+yLX=fkipXVLtWifY@vYI7Yc=l8w)a70=e7aX3Fb|37I9**Xf>fJ5>!H>ghyp z3$u`BXLtRv35_+;6v`S(GmtxyQGn(EG3fM{v6)PCNEX;9@p!!LqC=Fm7qbQyVYc{gYjQPnLwTN%)htofbG}80QGBKVFlL>GH|FG z1@1T46ipJt?B{%u+i2d2NTI|bMDI&Khv8jskL610>e@9428$p}XarGcL=b*DM3$8% z!6cK2%Q~Bg;2h%rPCv~o3;oX(dw=z05m`zr*7}vkiC-BIw|($y=8)I=Sj)PHW2^P^ ziH*-It2GFoyJGQ)MP@|YgCmZGykhocf@nF}*|vCn^imJsx7+=Ls5n!<(e$0+L{`qk fCz@+>nfa3x-@oTll!NI50*Alf0pAAJ7uo*+%{ Date: Wed, 17 Apr 2024 21:12:52 -0400 Subject: [PATCH 306/329] update --- openai/webdesign.jpg | Bin 0 -> 50451 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 openai/webdesign.jpg diff --git a/openai/webdesign.jpg b/openai/webdesign.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecce702e5de79a97d561adb423ca70317d93157c GIT binary patch literal 50451 zcmc$`2|SeF|2KTuw=(u6%alDrvK2DqYfD0#t(uBLLQ;&38B5uh@I^_cLXw#5B^hI_ zBwG=xS%hr4CX6d)xv##z?fd-i|Lb|~`+4ry>p5I=U72&vbw1~O&gb*~EGOv_7=ey7IpC0e?{;M{x*L}W!J@b_PZpLi~B!zj( zc)j>|)Bs*d9zIDPZWn-r`x4;!tNgC?yYTSx@e2qF35$q|!4(=M0A3zGK3;x40Ret^ zC_J(7_W-}7fYc5nTR~}$vqEa&GJE249tf)+s3prD83Hwo&s|9n5tWlyP~4`erLD75 z*TmG!eD6LByFct59G#pG9zAyagr}FckDvef3ju*a7bC7-i;TiVUr)SoGbuSGH7z&q z&-{YI+joi{mX?)2s(4)aq^`c9v8nm_i_)Ql)EB_!1{`_z0l7#8v<>%+)7y3;X4{y|O!X^0yb{Gjt*?I__ z4VPBi6E7@tAm>3XSw!9V2q=5*%8;m>h6zoR@td^2DEnU{EaCqZW&bAZKXgq3b})PW zF1+wBA1^PwRD5vZ7v%q41ce0uDnkD(!haQ!-$m@-g$sAW^SdPDh?cYuU~@(BS)pi`PeUN^L?LgKt-# zQ#luqS3@(35;?_nGgGw&R?ilF07qwLuERQBO8%l&53!a}BwXfBYLVy!Ug zW7Q_aF2-3wB~)1vfx-j>k23x>!6S)TtsEI1w)hZwz%Us_eL+CBMyOH*aGfNT5>J}^ z(czHQXShf7^XfmVzqoNk^SFS3?W^V-tm391X2K==m%k}OV$Q6Vu%2D~i5O9Mc0GUx=6I0@SIypPJ_2*S3j|qt?qA`xi@p`kp;mJ&=i_&c@`m zbheXttA)y1nksg1f!=ffpyho$!;qz-S)47#;^}4M=P4}v;7^!!j`*dKGH=uDWISJ^ ziOLa*_lNu149sC2z72(kB%Wn15J-XcCU=hZ{G4KjL4Hp|xIn=P)!jw=l;&n8~wi>T&2CVyS4BxBZ>bznpm`Na2AD1XSh8RbL z3sg_W;b#=-OBO5YjGtV9INGx0!V&#VmgJy5XRMk-#!Iql*W3Cl=f?D*U000mBfcL< z<6LY+LwgOU3~!t@oFaURnELqOXj1Tg;nO`RYM_m5j0-%4h4_H|mWPs$L1Yj$H7#IidhNx6##b3dNEi zVfH3=T|a4V)+P#8#I>3w1&(@^Nt5@lDDT#FGCuPyJo`Gk7C&CkjLdnLixKP0=zpVq zu<;ESczulLS00L`Yz*(SpakQc&^m3V_q=Zceupe(AQfKc?}m_CZ&K)qZfkw#C!d|j zQu^k9e5gsUr2TRmP!XMrZ07>sTb4{H$w75;wc$JjWvchyh2wn_zo0X6a;Hb&7i~Uv4 z6Uba3)A+&nH&kC1l68d?>zAZM`uG@Wn|AB52@kXdMWvy~KVU~#%FsuttfyZENbqZW zbzyd-ej`BIJ(-eW+X6{aR@PHbE~<)s806$gqDh;Mq*+(Y!#wu6#5KkHh}qCE$ReF1 z+*$Ee#j*=8v6iVyyX>Pku;t?G1;TOP3#Nniu%P+_PnNm>RP^jIW-o~yN6%i=TYt3P z(Wdy}F0Qm<@}%@vCy#-7J(ad?SgVMF!G@`|hIB5FdLPkg4s-B*cs!{o=E#ghaB$6z zRUOQWhAi+L;hM3^M}ZV+Q~eV&b~n-nI(=T&t3G!Rac~Og?y}MZ6fbG;GLG zYkZ1D(p*hD=O)gVzZ7S*r{L}HBRYZ&p?ye>$ShrWfVF+Yvnx!ZuXxw>sDw3V#q6fD zymgIX(B1%=iP?xV4L?3R)%J1^uV2zg``E={q>cY49&3pB-E>D9YVD%@cO$n69ll@h zhDcNrS{mzu%D>O)LN^#GpRH-#T@W`r*r%I4`bu|#3Qf(IgY2^ALjy&>>F(wlAO`%G6egDsXS}{QI4qgBvS%{nJO_; zn+xRXtv^WvSh7g)I119KU@Ihdxv`YzkgObME`T@&Z{;-nPzv@BxXT_`F29l89J2>- zA_Pdcf+geIyoh8V;H9x};{qB--B_})^ftjQ!+#ZHj5g$OfyRaP7Pxx}BzTMn?L1Z? zi6ggJae?^YjRRa@-~7ECdq$p{@GOH+l7)KQD;u!w6n>StsEgtqytiKe%sgb z>uIqEyG5lTN%SC2E<+UHT)FD1#1StZTOJ+hzCOG7i`I8^bf#m(tMyO(Z zB!y+mpFH`_>(Eyu`1MYC@3RtKAnSGbXJlXb`Ccv{2+dpX7!z+~L@-}LoAjGHo{O-2 z$XC+g(cUM*YhVc)kvBY7&)WldD|kbJmke1_v)ykbAyV)ZTBXJO_7V}(e1z~WFXvU$ zq1%_FU-$Z~lX&ptRy^pCo!oNCC$tch?_BSbGi!T1wk;!Bl^Cv9z=|khmp+WM76ye_ zQoS6Jda&8uf(WleTpKW4<84*HOI5W_+;gxtB@cb%JDQIp2KkI%Vj0p@Cf2xYLDG#=YxwlR zb9nACPRXcD9FFySW9ZH&_Qq4kNRYPn4eTUgy!SX14;K7zFwg><(}?GEyCY}Umv`=ep^=vnAnWS{_q+o) z!3Bg@M{C(7_)to^-1a}-NMQ;WH#YR7bKXtuaJ*0M1=2XTzTZd8Y<5td&9ao;z~QBU zVJ>iqB8Hie03}e#^jC>u-=9U8sFP@(gIYRnMdDgzAje z6}=4uc#L_JY?1_?N`y5K7YIU*D|K^$WTu@4{{=5Dce@mfx-9UYprY5?#SW;DI#_a;ha|n>I1am zu~jDvcitg;8NlNo7Y}#nKN@-`!p;SjExfR(F|00v#X{cZ_2A(v!4 z+PASci0uQArbdz*q*J&=^?eLwmSwjGxr?$_?(?SU{Qq(xW?;~#opu~m+x93?FKE*l zqj`ISZ+7Juom3_;2NiL!@U+nGNzOw1H&K`u951W=3`+Z0-D}*u-l<>S+9R{X??%R1 zv95r{-zY$EhyOD-zikT^AIutuUxYzAge5v^zBCBEaVd#}*8_z(BIel;rebM2dqZae z&AGdSNSXgXXpa2vnKm3Oi8|}*j&b3LSkjScJl>P!5?!#M>2dOwnXRYCFC+X`80fYQ zxyMCcGlQ#ob~n5C?p95(-^vdDDaH}s0`?Qq@YGB+a`6XEnoa0*VDNC{8%uWD;oBo; zNY3~RnW16Qxc4PI49&h@x%1Y_!!@VsiyNA1Yf*RZBSIgh=>Y${__!ng*9qj{QPjL$ z2$qZ|a|OBPu}Wf_b)yGW@YV#-*^U6|^e)YDf%8Zh@Qz%)#ugalv6pfG|8zMx9%MDc zi+1z3Lbl$D>+qaFbnG`i_`zBO>&JL0jNM5^h(Er2{Dc2Fn}Wlm%&NvaGkKX$y6r+P zVO>gbZz`CiDR>-D1E3%$l+#vS)n-OG4Ys@fj`BTJTIAPZFCE`xbmFjPg4;5$q z$-|cTc4Tbj7Z(t(h2NUTl5KF4#Jxi{kuug?FMK__Z-k@f<-IU*{L|@L=cc{0slJcC zf8)OSI!J=po;-jJ0GI z2e8a5|7O*L(+nD#ZTh|nk&!p?wri{@JehZGhB4c{JQ+bh*!4#I0b}-LT`Hx@fS?>M z9uv3Q@lo)`zW-(p^j0i6qDNtnA`c7aGZbOW(ax`8;0KBXM-O}GW1cPOmUwAK!tKfF zCaX6ZdBMNTn&!zBw=0kF2PpOfxZ6YMdlc{x7nodtKN1pHyd)^`@pm+s0`+zHFiUAD zc__R_U?j-rYrb}&m72T!_1zPU+T)tO{Qle6lK;(k{u=?=whit&o%0#T1K+RX-kw%t zm$HKS=A$CxcU|4T4d{=kS}+gjtFOB$VO;OBL#ZjTk6x)Mltg!Q4H5&u**K@JSON(}d|8a?E*2|A`3o$6F3QYLg6p^kbUZJFoAs$rJf7toqU^(?@#v4#sYu@4vpi!t zA4E4SsfOJH;;dLmn}*EfNEKOT8g@_Y&#Dd9&~GzKW6I`7Hisus2ZgFV`$$cWi-{nL z`JG*2Wxr{P8?^Ytr#YEr7doUezon^Jf&KB_*3o?rv3vHCUz4dhXb;a`eAD{UF_W4hvllT~jJQvJes^9@TFwfYQy-=`#`)xsOOEQBx?$Hbd+| z_H8TMO>5lH`p=78K$61)Rk%Y@H5ae!JnyP%h;?fN`RXP5r$5f>58fasg;XP!RY z^gfoL$87t{K1Rurs4uuc1F?rRzQQ~Ox+S3kC{mv>=3Nw-Hg$|&`qFW0$1w|R?whFrwLwu*e_HvJ%f1> zSl#w_n|}w1+KOzTbfL!OIU*j6fGN)ol_LxGjPs4E8=v`>?L4NuLkuTi4RYmtCsg3MNv};avDSG z(HeCxK=$O~d`{<+i2{5`DWQWD*(S3Pj=4)KOm1}+o+!$?6L7V3&_#4N=PD27Z-D3D zz~>jN7X&iSps0VM8jCzSDrQzCfz)U&O6zZ?(rr~ekb}~$RH(r@LwY6Ld|Jlh` z%Q?ma!T48jY|^#{*Mpa|-rYl^^b+J+1FB!3TU@>>#0x#&VKKJ4A3d!pk-T-!IKLRN3tQ^u%)Q8n_{?j zybLO+ayrX$84;LQ^Ji`w*{t%aRr+2sdHT_{#o#+|mj^hn3sJQQ*!4jxY>&mtF*Gc^ zK4R^SokpxCMxVHTSun{?!n&&KT$sI>20-Ra*P>o#Fx-yK8AjD-$oH$>Wn_1#BB2Kr zbJNE?4D}~m&3=slQvQJ-i;GvFu~l>Qz}V7w#8a#q+-L^ZLy=$oVr52{$~5e;=KZi7 zwd<^yZ#OCn2H)u_{Qo955WG@)zjPatL>U=O@7QUe)}@iTLu*!PlqX zoj4Jh(=ak%nL(*&aCk zZwwV@JLlx2Tm<;&Q3-N*@(~iXxP%{iM&h$VGe~3~e0tH0rEz)x(@BwG`Vrngrf`y< zo^{mnf4e^RHDyjsM16b09^u*Qf1uh2X3V9%Ejo|vk33+k2<+7es1X;ucu|M17ate~ z1fgWxc%d;`*2~bsNZgLWMiq-de>bC324^s@bunimMqc4icrppupqlvvPnK?zz>Luw zKd34uclcaeP7W*kFzb7@nbY~Qq(W0*|s2aM&R7g>adTF)w8Npr_WyU?iu}raknIrSep`d8|a$c z8Fr&ckU+~u1bOT-yB=B?qEcQa`54f(mwKWDM9P9;ELkCr=y{O95qSX0unsUZ%Sv<^ zO|SL@l_0>#l3yPhmwc!x59oJW+9lpXu7$BwTm!?>Oso}h0ztShCi_@^$NPKaa4Hgm+8+{JKW#sC%s43|YD+Hbb4W+&LF#A^f6r{6sHGi7z) z$pc?`hgp7L1T*+UYe=vLM|`Ah9|p~l+ULO(zPeCeB%*j*;+V>$o%oG3PI1Vv!cwj! z_R>iK^6Lqs>=7BBpGerbv>A=-Cp1ucNok~dbcf10Si;I-x4mkpWtg@W4BIR3`T8^o zrF%k2Q!(w2kP6+t*W@Fyh0J(p9js!Wg7oR=3B(lcNs;zVEA_Jm<&~!`3V+gW_}{b_ zC~?^9HRbO8`KG%wFX=W4+I~2Z3VSo*<@P~QABa_8()2HIR(j<2IHb7XORGoA{IA`@ zW~tws_>zCv50loc;rSe{8#!S(kMs8gI0!o+F1u4!>a>Z4j9;% zCAJ)z%(C2*Nal^RlBPPc@eVgEZNNiP&J8 zt67KF8tknMuKi_^IXzaS;1YIaxa)>On>~7up?*&z$#M;F)~KKpc!>P~C<~=7|kX4dFGIn8TZg?_rvrx?AnxNdnHg zSxMR;zf{Yz?k`Yaf+1bcr9#>-9Bjmp&A5QyvI6ASRg~TvYIT&cO|l|roBvU>(dUyo znc9Ip&c}0_z(|xZWf;BKMn*UFuzq z+QY{eW~cu+5-oav2~eH=FYW*#dLc{MV=7axod7x*W?Ljx8|F4oC|g_#81-Fvd~j2p>(+O}N`oRdTq`!J0&`^sPSrm4*boJV z|E;pqbojWV{{eV3}bQ;@WqL~XRV z4@VtxDGebDTb7|1qAfgjIiftL-$pr^B@yG82Yc%i_TvWR_GHL|A^E+uD?T0@rnn~0 znO>G;uiJB2k5T_5)aTzDR``ZawpQNtbhmA=&Ru@hs~4^D#vLx2_r5*ur))P=U7II?t|UYt zsxgcuC(Uex>4>Aa%9YB;H!2w>ewEMN|5pFO@Y$?bcYm|7J*W2k^wdk>`4-PszR7n> zhYzB%Us&&eS<7=fete2Pu*3%8f0Q6vAU{S$hrUn>RemPl2VtUeWWge3`ZAh19^Zk z)Wem*suiFi`FeA2$cfQI^}2x(KDHMYWfFL_rc}(&P%}K4J3(JMK8a(7MS{&eD4|pK zF=Y_ZsqW+Aj~!h5{=dV3YGFg*?ho6}8AoBEmS$xF1>w2hq!V%d9a)ivT?Zg;mneZ~6J2e<>uBphzqWUO=( zwvf&sl+DS6@iAH>YXWvyYm-BiH;5^XE;#MRp)P8#Yy+R>kCZpG{H9{$KsK~@lUetX zqlB9*k}C(>L;@J{!7e~h~cWB4x`}K}V%$;yiPz*QjQ#Cns)QNs?>&<*vqM}l zlDb!@3|`BvrU)4ANWvq>XHX<@D%gv#rs!TC@3c!pYz?#+%lbhlZJSM5D%0Yu9LiBk zm)i}fIe63^`8J)Rbe#z8)AJtDfWko}jo9lN>;Y2R4dq$(^&@Rs^A-`U>YL_oqnZ=u zy~svBeY;hIr^Dl}2+MA=K|LzIgA0t;(}a?7{8;(nbAv2>ny*5`lG$73{4`+)LFYFC zeg#qIyl)q1b4e2H^@5BU&9?On^P6j%k&PcrCZVopZi=(~sds` zW5rx9VkJ~#rnUC~)CX#FgzI6k_Z|wm&za;{j9l$AOfFFw#_EnS8L#mT#9jI~TvYCn z`b~x{{y?9r+;RLi&9-;?4f`&N`Vb8s5DBw7wJGELm@`PI7kK$<<^ec+w78Fs-{O<> zMLn>72^EyIw@)(k^S!tbgJu2busD2>;rL6Am=Mf#A0gwZt?Hij49`vkfK|WSi<23y zG0vg8^ik?hDy4{g3cZwb%*5X)b8z>7JI|)T=5K%*Oggtbad!%rx4AdSg^m}1M!F)` z<$YA|P3a3R{}JM&cN}5(>DzPrtP{t6F$F&#li~rUqL?q)b=G@0efZ|F#Wir&J<;ts zh1{OWB5}HX!S)n|2W$ zZ?q|W=a?`|`&Z50F?-!H)gjvxf;#dK^yyjbIIo|WF_opq5w{rfFXV{4h-Pe(M{ixk zX+TOaI@kiubZUA{^glw|&Agb^OON*7Wz9GlCt||W2NPw7qSg=^13D=gNC})Up1jvu zDF`iTXwCm!5+c%1S5==pSP*%6>isFL_m-FX%u4syy*oz=WRO$3tT4ohpwO2~O(yq~0CD z8AR3*7L(z&aUioQqH>^u<;##G(Hm4x4usIM`7cI%=~Q{Y_^0!a5`jZJw{AKJ!nrpR z@H;heqlCt&jMnoEB2$vxX61%Vx7NqXe>AB|U9)e_`ckvnk9%wRI8~xo#wa4$D8jvQga<{qfe9SWiO(YOHQYOM#AI={PK$Ta#>V_Qhs zJf$W2HJiF!NJ`74C%0QWVAHOi^A-<=6UkxeO#>0)>6=a%>EodF!<-uZ_m%hSzWTh` zp&XrL<~63hmtTDCS2#SU`mOB8r1MC~@Iz5WXLd#ts-Bs;c?e_HdvJsm_B6$Bzt9~ne<~}NBJKx*@Jlw@>cz%AcZ{hv-!{H&nO?lQ1u)DVhm3@>< zBWxiv_l7pcVD2J=N5c%Jw?@>B%0#ZX_>;bLRLOE`+LSPdATioZA~nMDl#0^f#%C+V zxx=wVrrFWjxxS&nE9Sqmjo|)^pq-lv(6TSP9P36838pXipyfNxeYA?9zv%wBIcXWT zxxXm;%fzIVgOtG&S!dlRLJRu=84vrvE+ft|9d!@CANJVWE^nD&p`g^wKRR8l=f3Upas_!tEH}4{oEyGBiaoz+v!rLKy*v5~Kpm;}`lLJDPNT>JQT#I-ZND z<2TY(d3ktE>7)^-K6u7(LWrr$z6I-o5!mV5KBSM6Ej4U>mTVy(zk1n4X}_3T zLWkT1je_$xWsEd(o!@A^x*vCaP>AXd5BC*kq@~4$Iv|aMb&veF58A1@zG%HeMQ`oT zuIL?FKX}uLnt)KLT0t^@*S5k9k|a^oM@Vrl5ITUs?x<(~*`~lkeWL`RrN2=(MBa?sqAh5i7tkFc=K$KsUg#b4753c>(>&&z)Uz_axNo zCv#e9#9DW9ezcz!q#yazrRt>6`0fIYzPf0emj+951Orl)#UVIR!yF5zJ@P+**zl(x z!;=h05VaQ#A6+EK!6;KG$lOA^=akZ6hr7oau{94;e}paNdgeOhhbG@vJ1{KgH95nk z;)mfj_(iu(H_qp;+n}v5TMDsnjT*x7!UklAd7jVEs++>YH++-x;FGUGQ8Qo4j$Hvx zefI)R@f_*+gT2uc*n1IRZ2_ZLGT9u_@zRMm_gNR%HyeLld#n|BVyg4B?y$kt$(x7y zAFoRvU{u{YSe0?J=P*Xr(9{hw3|p$yFGvn9h0shp5L;K!_QB_ep*-}8p5?Dwz!2`a z7wVs~H!@=1sm`!NW6M7T0G{DEt7W?UjUU+KV8uz$zN;tt+J3E>?y~cWkwtPDwa2G& zuh=LMXX`mzZ5hQA*(@oygOEq{zEe@Ou{7A%f|zfr*_>{CF7J9pEj+K`$$Q}BYR_I% zwq9{guMY={KZk_0<(F(X;rs)#kM(XCc)CS|UE4IrUQAf%S}!ux*h{-UVQ{~X&w%$jq=;vY_8PqCkp&JxGJI-E0g%aFii1$Hi- zNbejSxE%3$y;t=kTj6m#an_O94!$CF!|1S?d1&auc`tZKw?7>;=HV`x{L`lsh- zf9$W?Fi_Fq3H)2SFL&&3=GM;`j@YB$DI7MOA($l}XnSyhL3`Y@v1vz^yy~zo1=28I zlHQ`Ofl)f1m+dt58NVIQ)HQ^(zDfR0)3wG4bxL#)<5*U(cx_e)Uh;t~IU?b7OSC+m zY{ucm6!coVF2mbc{LmdL?de%!Yv8}4-GAL{wCSE{B$@yeT?2ipVUz}qd3MLzH@Ir; z;{uT{FaC7PZjmdc6XHuuz@BVrx2VcMi~OgO_xr5!d|yAge?8YmhUb?t?B!p?2%Q3Z zSyF}VC`l`SMjsuKI_5*lYV`Iykl>Peo#}gX@2=jIj5AWvyrj#)aEw!U1lEJnzhnO1 zpjbzn@i6hKtFH@qB0nJd?BmH-)gPp7B^{%-nllgQrWeW{1v1{tbvGRoIJxI zCqP`)cO;nU!q^hMT|Qsp=4`H`5Boht11s#I1u(1|FTirtBcG-BtF1de&)uy!toNx% z>-<*ob~78RZQtNvKPt|`ZLxCG7{kX57`&#Jd9Jrz6*rhG4lEU!khka+CFdBglJ z>B&Flc3rYHdZw;xOZ<-FE8pQBzGQ>Ur__>IvU)D;#x@f)%LJ4s<5M|`gS$*tV!41w zK-#L{P(y0j78&2JP?ZbOz8aaex6FPzIK-eQr&;}N3 z!6OuA=-{%NR~ciVD|o?|ed5zW4bK>T)i)J(CYTc^o)=#5{qamz5C#E)Go0RGTz{K5 zbc&9Y!aOoe;fS|>G>vgfZBYS-Sch6lnvDmdb)7>W8Yx-kC?Cp_6x`$W(tK4ZzIYf` zbon--U_-DaqiD+u+X^*`Vt>T%wF zm0>4w^l{y2QG>O6nz%iaPF8mGkRuzh!JaLz0!K|=6uDH2|BRQHtm#_FkaER!c-W|_ zZ|-G@fQ5_wo7NaT<`vM`Su33yE8cscK018HN^bX4-GOZSnHSbGB-pB^{)q(bC@?q% zT?tvRKNS_^8rif@9UYM#CzCEsm`?kWX$& zUHmcSd0tb|i}%>Hv(R$J1vpzVs1^3K@Y}PWll=9q4Pmo_Yz`;1Wy?XRj;4B{MsO@+ zbuvt5j`>VSvq#IUi@M?uuKp{0{QJ)H_w05(8k}C+9Ipl4gQsv2ngcn!b_=S!jIH0oAB*sb2LurXZEONDx3zX%l`AfIQaf&J&fFlp zh!fem{~Av62vx4BGoXi`U?M^xh9L6N~0}@ouv;|<}37G*niWj4yGy;PY&X2 zzqs<570zxUUR|=jEWLPI984lq$Q+IeDRJ9lH&<&zaVyd5BDvks6wl)OU16Q{u zjw#MGn4%$5F2E5{r?=-~4}t@!tW)7gwOMb*g=#BO(PtfG+q$VvqRSV~6lDjx=&!UMp=6PwX* zI71&BE#F@cpZQ{DotU=ff`H8XIKq7uP8x|!95^D$1@_~HbXXE4%tR^M>g) zpD>oBAx8vR_OLg2(*h&VadoA^B|4k#e?ptlAL{|aQ57wACP&7%yrL6%QxyTOhTqTh zJUqC#6lbphM2Cw5_YrRhioe$k4cZ&>e<{zWbT2OnhTocq%zpf(f<82vFZ8L1p2rby zhF5IoYja12{E4&kR_0HT>}KM3V;2w+ziK-9@cXEl-=pnKStz!E{Ut7N_B&-|0m0rx zgYZd(5Il7Ojul_d>$^)oTnSSj)>%}s+N@PgfGm1B!b$Qdu+cEPnWcW6#0AvGRt+Y^ z)24!bY-|_((BM-Bl_ZqBBA6fWJnEs7LN}I%U0bvWqHYJYb5Maq%7~kHM049y}Z3=7jrbc z7itf1fgL9pMF8j0G|WutH6B|?U=OCon01M6{07-=glna|71q(e!Zv8&dM`P`LUyegZfv}!w|e<9YZiR`u@C* zW>Z^&OSH#}ExDB^gyy&6RNz^E0h^#8?BFDuT;fQjYFz~p8LYFk$0wqFywwwWJzq%f z*yYTle%{6s(BX^Y0?EtR6I`HF2U>v_!#9lMT$2okz!Lg#AFT~j`l)X4NhFoGmE_Iq zK^xJE%z|%4>|gDrFbccp>dO%ghfSs0kJtQ@VU5r8_^wFGu@|GQA&A>MZdPKFSNPc7Bhv2$R6ngFZ(Rd036{y^+9_!nNN7A!|)8>iwjrz1`Tk@ z8-~hp!M|W8O-!8mh*QNBG7qrJOx$#}L3A$?LB3{nM`_nJo56tsg^rxKE8zoh3!9Tu za4yK-ldb-qzOB~_gZMwBumX$**P({3hmbuJ2brQjkrQIsaU4_B8qWvPYSBS}1v{n^ zLkxUoIl|50{^Hrtd)UP9&Gl*Q_rwl2$m~A+_gWCPphmJvxj^~4>}K=-lqdABe4&5c zg*$a^meB**i{8pyfDc7&MK@Noj}zlYaGDr=2SKu7a*ynRx3zC7zP3<9Lw>3iXsj*0 zcUwiG2jrK|U!#!{C}22p@fAa5O5!xu6GWu5q<7s%Qnv1-eo#Ab3@?(C@}<`*!Uj1= z>2ne-Nn9mPM{&gTXt3>$gdrC70f`^h)Z^;RR0cAk6l!{x3q?3Ixj31GXxj_AdLA!W zll*q4%v&u>Sb4h?49P}%M{qE@NrYi>fQwjHST?WA^7*~;c&t5q5>ZB(w{M%b zX>GfDl#u%V@C*QNyVoZz=xo!?8tpSFarMR$0AHoXs_ZmL24{dpvhTJjoMIh>`sw>o z9XPo`x@|<|g1k#unCRv7HgBglesOzTjI8{NAUHP7U!u%zhO?mIVJyLC`f#0Ha@PAn z)tVO7a=)v@fJpzQ2-Wet?N#{`pWgJ?syyafrFoySd+^#S@i4KU2x%df%&DXd5_pJ^ zh(b2r-P~$0x=wBhrdWP{;ydXU#M^nLS|hVP%jvUJue$PB2*ZyfdTo;jyA|qvfE(Zf zvK)O(*dGjhs@(W0+bpJgk5hB9+~UD$_d`xU`)(oCRRZ~bZ0Ox)6lPkfgDV}^v$I&y zl@s45406IWzpSwr`ve*rB)1M|)a?^Zx$j@Qm$%K5olT;aQEGda;GB2`Occ|V5ysNe zqeh2NKP*>Y`f=24#UOFdF8M1C&koz5Z~w8>tiJd4YoRZ(dNf2P0X#~9w26xgk3P?% z6|Idic7Mp#fUSEjK64JA>Ag4f)TrL+{ccEtDl8!>MglKpf z?8UK~3k=7UYm?O$iX(?VtVz!FhdlZ<1j|C7$xx#nnVssp!i(L7Cg5;5oCSg=!&24V zmkXciAW#bvtUS(x3x^W=R|yKiO^EXGg}ReT0dl)f@;}EsbWMJ*Be{M?IT8Kv_!P#i(#m!V<fM zgJVp=nbO@T!PPmeK4ZVYSX#A76<&7!oXhhgSF~GVtzCMYsm?XLwSn(0o;t>8@a#r| zSE`pkH6lOt4?SC8z^1ym(f612tnYjqxC37P8#%v@R;ISI+*T?E`FtzrdcQ_tArSTB zTHh%#W&?XMvsAdKYB)B{C||rT(9foy zRy6VTdYEMS_$|INi+Q|&rvK)h)7vt=(BFuxcasZbSi{;sF}vDyKyWT{V)QBM0^18x zI{dLF^Iqo6E4jzpys*?d6W@mun%52-h~0{DYaf_g;fV9HACWE&DC1sl)Z78{sL7eF znqhL)h-8*a#YNA;>q$9AlI*6pGfJHT-$V>w|8%H&R+lW=3m?6X7(%}%F4=AR;>n&Q zDuGlxkY>GuGkhJ6k|eP{*&A(0F3@55=4Y+3Q-SFer> zNjlT@M;NYK-^D*G&)qYv4sepIezDkPGn??iUB69}kOfpHQlSSIKXsN>wZVHt(M~*x zR6?1Eg|~lV7U}%-B2Ed@_bVGlk>3xe*$y80zdc_fv;spEp+DxX|AN;Xg&FHTE$a~L zLN1~tt!;)>w(3pHyYqg-Uz6=Ke@6TomV8<(y{Cj{$m(p^_JaMsZjG&u%7Wf>PyW~l z^j$oBiezgyo%EOMvHZ~GTG;paYT)#x(oGw-AJHG~-e?Ob_m*Tg!=EHpA0w4H}a)IF>y-#|*q(Jnq?ImWg z6ceoA0%gPQLf_rZ)9+@(RGcd(Q_ibyzZ2*GMffC25G#)PF?Z-{}q}$pY zs%MCG(5D+Hk}MmB?E7ld_!lRfON65wLnMuA4|MFh`FcvLt&mN^Q%lgicv|{+uju9&tM|q`H_b|C+19Ex86PTnJPS3 z+aVxur#RtV*Bnceod&-xp41@Ug`dc#B_|8snZlPH0G-0;hP>13FL{1?UERTq9C?o(ljR9Sos!C}^Xg9?vce4=P`3 zsf&E(JUnVM`1WOU4|b#g`(q!J%=BXai4V-qKs|$%;*!gz%bYb2WWI@lR?x}xuxwVi zWnUGh@aRxO`s*Ji{pxOwnLuN6vzL9VL}lc;_J>l3!q`7pd)b8?VV2eh-1duY0z>x2 zTtLS3&b4K!f)a20rB&vTk0tGMN~fKu!E0uX?WIr+!fV703K%v<#sepz2--juVV=~>`)byMINT6c7x)|JQIDN3G}$3)riarvHQ$Pm4_ zz=&bO#$rO?{~+wm2w(%(>Sw{AKvZazFTM3iOmP%={#iW#FER}7t%!uq;2&EW> z5SnDG$k?Tl>|4w*m7SR|E@tNXJg4t{-@oT`KmYt*{Zks(oY%R$m*f3DjuY3?Ox~`u zQB4$s05oL5FwK4Ht!#QX^rPy6?Y$=c1II*lE|Sx>r@8JrK12l;9hg#T0-=i7T&{VE z^A;2fiLv;@bf|B%cO9g6t!TMJajNytRR-6YHK`-lx|GEBovL$n<9$BFq+#@NwalZO zG{X?u`-Xiu8X!5W5k(ruXal}gj#dc+F5gdg@4geZasri#qFYmv$%6I1R8oQidO?=% z+0uvxAkRZX{eyUUhFoyStPND;q}8eVjd|g%ZEpL~8q(-5>C8)iwGm^4w5^bmy_-x6 z+M*M3heCuYX}_^{FU@)9d0$FDah>oioW?k`=g5lGI$xIevkoFk3Ifwa|N^%*84M zcR8(9yxErj{i=f$R6?zwaibDcJI)^#$MMv&;7cT!8tvnHrr~ny_*#u)mRU#^C7BvhpTw zg`LX9Bo$EIHx3noD(TNIKuCsiv%~pHYS&do!HGCO4_Uh}4r;1HLs{r={$-mcZS9AaKAv9aoBmE^jj7&x@O4Ag= z@Q$7c&UP>jxrC-;0o`NgyBmqp_-#-*Xd&}~OkUSifI@rq`wvk9U*FBOFBwZO9?IWv z-DP64t>w@!0+Dn|LihCCh?PWZb;{y>$FuiwtrcYM4EZ-NTu3-SCyR$5 z+}!-0aX5GIYEr|d506~HXouVs`557rFQf)a)4o%+i{n(%*fS7d(U3fz>qK7v9P2am z^9j^v*e6tdhu3A7=4@@Q37>vu2>>Pnjb2AOwUW$hnx&k2rw4(X2-- z3$MQ-#7yYKx_#KSQ=B6hfm9+t1vNs&`X4}tBxM-Ydco><+iQD>cK(r@Z02#1F>CfN zQ{|r*cvlAvb~(5b=7whW|1T8%--!BuQ1!pli%ryL)^Vtclr|Cq)r^{ zpYQGu+PvvK9+{g#C$VKWrb>MQ-YM4rKIF!cZP`6sa~_X9rXczcB5!CertP?25WTxq zue3AkY`ryWPEs5K9(P9#^wL_K&?JTz={mrTV$I3&JT-5&7?M%J%KhBm56f3>BSpF*t|U2DPD~iokCXd8O?*^Rt`M`xv-A2g&8ree77V= z&D$}2o}1SWk9FQzF@C0PeXm#;H4&C^YnX_?+PvrXb@k1ouh%6qYduQ>WIl^WBIaZT z40b;U#`WPnLQ^}1eqPG1)SL#BS5@Y@HpZ@ixR3g48QN<^G)U@B)-W$?5E$qu%@9Z9sWCMuq?^Xmfi-MGiF&U1n&PRv*=^gGY>!@m6gRgb= zu%AhA3bwD>Y^!JDj&@pMN?0sc_AVK+aHXxTp`Tj>$T-NH-$G93?rb|Er=AA(>1aMng(5$ELboK9CbTK z!EB?H#MR*ri^Y1!lGiD(W-U4HzL1_W%olKUtoRfyq-rT0S7&p8upfvotYL6q2gzL9 zOS?3%P@XUE;P@ofe9GbdGxDf9l_&gm1EV`;a_5oub2cM)*S=*y_XyMhUq~sVt*3s# zSjIExUG2nuwpFRNmO_COm-F-sLeqN?Seqto-zjSr(Dy%$!fQX?%e#r zJ9D*&6#%!5>O^bUPIX5v4hJC1N4WqNq8~bEjp!zEHG@& z84KGxsI_tMMmSnA)ED>)eL|Bsd#5lZ)HUY9#2s{{P91CFZfBwwnQ8~3Bw)Fb&ZBsIGP5sFOxtn{%rwwhF0-w>dCT}P0duBl&%y6IYdGRj#2wL0( z)XH*h-q`srN|EJclokVdOmzKsya9XX_#B+a3Q`$_O z=|gPE4w_oZn}#+jdN_tw)tfoGqcFea6&KXK57kS~_X_T*y1=`VzJ;@cgAnnD?J%&k z>iYmwA%nANn|eIz2_I!Rl}X(}V2%uq+cydLh5tH?!r3-qDpD2_!24?9JBUcO$)s09=-9BF%i+qIbf4 z-Sm0pU&x+i&;g=MAS{xiV;wtT7<1txZY@cod%<&1MEFO?QGU65XCA(#$Y~)pZ#?>5 zt_<6NQFu+!DdA*l9_p=rNBPae4Mc|VbPL6Op|rd)R=K+@IbMq*&e>}_xz3h&#+2c_ zB6#m7yIC!dbh8YuK(`n-PciV-f(tMCcrwz3Z^w;A{)%XO*^arRoY92fkZpF-W&vtc zd{}GAqyPVPRsa6?wyg3o^N{>;`!7e{wJ4$T_wOL*YC^h^=Mz9dDB>i9ZMjgPUWg;M zNj&kbH~eJg#kHJ_-PFjCup8BzX-xS}&Lu*6gI)D#mMq(pzHd(ln85_R_S+Qs%;SS_ z)i#dce=D~9$4CBKK|#H%L2FbN1jh|(Z{;>N&#e;H`-Rkdv58P@A)BwPc=@IukZ8yb z2aqClu*=5V5#E_Z$KQun@8KeL!wUwVMv*^u!4I(;ecZp_;GT-Mv3`aR|6~0xZp27xWH0`sX-gWktD z13W58W(`WhfJIDC^n&vSFanE3(dte3+L%yx4(0R&KYxcmxi?cV)dvuSl z-WzGOjUIVii}IIa0dxFG96C**-&tSK_Ak|pW@|Cb?cz2pz1aMvM~<%XP_MyP%rAg< z3~%7}9-qWSpyhO15+7ix8Ye_S9scDH-eo6o_S77je%JY_$Gz*7oJVN)`8L$wUQO;J zCI28Armu1i#Dd>X3<~QW?q;T;*HsDYK#j>#%ywl6EEh!5F%;Q{448et*kd}$s|1s-KwZh1F;0X||eo@~5h;>Xdp~)xI zc1rstB*_#?x2k+}+vrrQu|eNrPgt%x?v)Fqwch46kC8=Ff1xC1K`_o1g^VWm2j6sL z=rZ&C<`djOTKJ=wYaNaU;#1a0z!P-d^WaeZ#VJC#G3se2g(o+sbo{jdBRcJtTFTe{ zArG}iAU;TN3hxYJnXnK1_zCPyO5~Cn7bE)@n340QiUIO%1T>NUnLxjjGYQAnpnCS-fB54#h9ZPVXUSS>60 z-%jtr}Oc%4*Su%Zmp*`T)!miGo@Rc$w4{7nO+9wq~e!ifK4cnUlobe~3i zQq4D~62_KWGygvLtM`J+QJOVb-<1l!F%0yMMxNR~zBYUR^ zK^Z|lB3LwXQ7IiY&T0LG#sn|8L7KpTNz5F6*k^o{tvD2m3T8EDWp)u+x9;&E@asTU zQ+4?vP~G8|8egTzgz_{5NMC+-X5fZE7@vKzdQJ};VBx$MJ&={H3R0ZhfXBScj!yG> zT`8p%C12-+ELN7E+@BCTF~XzmxgbHh9@peTFV~y^3Hi-AdHpd*W_F6EG&! zPr3Xb9I@nK>+_5~*ZmHqFt&H?w&=hZ4{LRMah*W*ToGrl;Rx`qEHFL35j@YPG#9w0 zo@ukK3Zcp~#Xmz}21p4#DmM>#Q_gvM%kTnFOvWeu8zx0Y#I3U9PSIL?I2yZ;)NZjgL=dNX)39a*%@H?+#kOsBTAVP#{v9DuYk`K%;4I4&0@8ceQG;b|VE*5LhgqJzL#}db zVZh7}UgRz?wRt@lphE-i0^uR|Z$?+q7+aCi*BG35Y^*^Z-xVE(9<==cg)mDvPwVZG zqlUT+WQ*-=hu&s&RZg&fTDZ~Q_~})$ID~b)9433Arq2GDS+wgT{XVSRAmfg~^xrPP z_jkV@?DjW5*iBjtG54=}@&AZ(3a^s>q{D&@ANv=}(OAk3%4Y{7er5zCplV;RQs&7< zLSrqeom^PiOMb2iDGDWrinFZn#D6VFONN-Ukjim|p6XhZ9o0 zhL?cWn5Gap%4_ZY^|bcCr_)YnP7ZWD|Ml5c)?_bWnz2P~VSXwe1L0gcIO$31=*=S} zu$Uhl>Ob1HkVh|36ML+DKpniVZ8P-FnZq`Gh4zn9ZocOPuaai|grjqjLWHJ+1nL`y zZ+rV_yo>)Jrk)jdsu>`obUy9pGX#zMd%f#*ZH}ewOPFE#|PyE45T;?TY5c;itll!ru^E2y~y?mNDB~jOoSWEEC)- z5K z(wqzFb+90aiXx7%Ro+>lk!+qHx+38z+KwSPF5fLx(nEB9uFN)4?_#PRU*P==HZ%IV z<}j98u$MB4YNGJqymvOiT|deeJ2xN4idf$}kDV3NZ&DEG$#Zl045IPUzFW3ZEXJ>D(+qJ;~!;RQLyxBMa0@6RhiXT*~~qU^p?Nh z;Ui@)0lQHLdZp%%M|!I*BB|}y@0<;42K%_-v9LSW+RJ!$KRt#m0r=Clw661llFh^_ zRQM9<5P9HaKq;=^-i|xsBj@k%wS{qES@+=fpo+M1KvOztYP4(q>O;cJ7sO`X%iO$+ z6-GRvrFU8W_;$TWeT;WG={-_oSz8{I8IK4lpcrLL2 z7*|Qt-QI>5^r1ydmM-qguruz`6f$~EDz1Ir`|79(vH1*QRh=A75DM6S4u0^eJGbFh z!8!I|g)4-jU-eyUK=#Pa7c~-?XXZ5wZ%sNvnIZCKgYJ~2D!=wG_=mdAGE2#nz(3$L zdP}n1xxRNdA|D8@*Nf&6SO(l9F}kX;Z<$lbo!TMw>@4L78Cb-x60AvhCW>2?XD%fb6cn&5Nkqp z=~W-*>&&artTP|5%fq(J1O4se=98#MH{KNDsY)NRUFqCM(AFv24N_LNDCuH1Gp3Ok zv-0cuMy~8zAG8!UsR1mD8KFsiZLeks>u9KUY6{4HAKIQ5+=$iQ9GZy&Jb;)WFr{0| zcgFeoSJtsmBh0#vtld9|RWb43@Il}W@j#{ze`NYHhsf6Uv1UUmgXm4`g8?jQXJ49c zB8Cr^DMh!UMLisLUYQ_N`MOkZgxyu^^RUFkDpapCTdKjb`buBr8AviPG03C5v}k~Dq7OXn7S1+sds`j3b4 zCzKxr-vs@5xMf%neD_yp?yA&Kbw&|&JsIQT3Xjnj2cKIdC&2L#VpWY-+`8o-gb0B; zgAy9s&xmPBdEl&yYglLcl$Q)wJzrA@dcuo39}-BKDGH8^F!QkBC4m-r_$NGsqTdNj z_3E9#e^KD+cUD!6A|^2|ikGHcs_X|;yb24C0Qb+x=(fEzkr zpZXTJpD&4zn@~&igi}JR=Bx0R>m=W{0a-jylwnH@F*E22*#RlB#J9`sA&%}H%R)Kc zw@NIuhP>`{fBN+HSXqhy0QE4uZ;IR-)bQVdj!3;gO|EuWYH`vP%mOk(3ls)*M$Qfz5 zc;gX)_QjoRMYM%TcLoYO({Y`Ss8P`-y7)Bb$oc{?^~oMmaXnZN9!X9_!(RJS#Skw| zKcd?GkoNCSH6Pq3-`?$5Hp0GvB3Z(4J5k~wq0(3_bT6F{>;2X|qj)ATXn*9G$Z6Bi z{5OOqVKg;WK{45)8adL}m>L(%d1eva5AP6;A_!dIomr1=P{bkI-|7xU29ydqs2nI` z+G<_fr}0B1@ow%!HMAe>wdX_SG_X7`@f3w!7r5U-)2GRM0>+G=$D@zD0?nZ#6j(42 z{J$66^$$XpLc>J833UniHU9(4bT~NLky<#`dCOl@L(zTlX4H~mb^lkwO<`o}3x^wrIrEUvG-|ESI<=F{5AfJNqyHt1_%G>*{Yapc%eAp}3@*>n zA)q1jn-vUye`KOjeziS+yzmB2-%;rC8om|wEu}EeHC%=Iovb@C9 zgzmozy~SwHF)E*^nA|(`qE}a{%7fj;x~#6$!1TB|{D7(M607p2Z(|*(K{=s)6LO3e z*s`phq37NY?rgn?@szn$UXtEZBx*d+&G0t#i(ka^JDEtbH0E8ug?^F<=Yk&JDf);^(I9ZC6bbPOQ8}8RH z92|fc=!P7Kbie(x^n7XrCt%z>g&jx^G~OF(0fo`AlW--&aVoPEn+FB84Bn&6IV90y zsWmkZtPGPo@-7$`<^3Te*Z`H{0lpImY&V%LOb?Q&sf1lR-s+RR*FKeDM2z=!>6YiL zIDYT3X38HJ;v6{fY^R`h$I6{AJ9W01Uxdzq6ha&B-G4t18k!1fCXW1rI2&kvXt~+p zR!6L^QKW3l=H0*1OZR2ZBc%-cH-p%+Ak*M$Xb8Q@x(E*tW!Z$1pND*u6I$IY``FE5 zKJgRW##a$Dq^fBZw`^wja0s)BQxCeIsV^F|!61Y1KW!C+7|!_xxVGmeFWmUoHXs0jm zVd+%dao@Z$(`2W$_HIHmb8fD$Y7@OVV` zRLbr-$qoUnh*BN*#=tMR_d&3K0L0h_`p{HL%J5BU_o6r`GcpS}@#Ve^y8wgjV|D%d zVxE(=>YM7q?Gt#SpRXZF%JOer!IhtSiIEQyD0I{1(G;5G;;AXp=tqzxv{wg|G);my zOo{$I&fD-A?QNgVOgSxiCri2)$;d`?NU$Dg1|MQekeJqx^XA$bF{++0NtPe( zDlGq6n6$Q7cGWN6N+{4$J8;1^7FPj$LmQRA#0@PpPQU(n+73DMHj} zZYhC;vu!g##{;71Zw-=a61DeG6*kbq&#U|Zr$qI_=hP&k*2tz4I0=D~XX(?6Ev#K^ zn@ppjK(rW1$KThY+Ud~bHT+&(bQGTXN{e_NeE`9(fV{z`0H-IyJak`mZ4HptS))EZ zSMtrzGx%C?!Q_L_zIybtzaoE3D_4hQy@ES2@7~1S(CJc}EQ<0jZyZ%hFsr)4)wkX7 zMng2|&kD?cg-&8VX7DBfc@Bz1{jDh{BzXCobQp9LWDllN<*&JaT&kF|I@aK5RZ{Us zw_tEV@4nXyd%GH;z7wbdlo%OM?dWGT6(*jM69_wC8E2;&^PG_U*{|sLLKm-vX*=J& z1qOT(3|JqR+wVL}s07>^pFBf8jjEY5L`cj{oSvBvKA{Ibv5l<)V(L8xH1-J8)WPzPnj_tf+<=Pu(vb{5eAUpvDIs{09)Q|t6JWlBwisDb z%Opj>Lsr5uPP`z^zTjxGl+uabb2>?X3+|TOHsZO5u5eo<(Pfz|NLdmXppl8cOY7M zVpPg(`o8N8adF!;yTn~>kUhh3vK9bzF`Jk+--iajv*HZ(%bO2*=^7c_dSCNCHcj7L zDNV|C_k!6zu0OxQQ0{=J4K3z^--#0lGc>vhsQ#YnL}+g}TXgWRwm7Acvh7S!B^212 zGAKjGjQPU5rdbB~R?z6PN)Ff0MkW}mvQ7IJqF+yl(-l(pet7NRfxpo@RI6onB3`(w z>E?%uza=!Z9sHSCgKQ*?#ZZ5uT6E4I z{_^F>PM3^5kC3^#L`XS?EfVS6*^hQZ#cX?$<^alN&8@k=Iz-a9E(wD4$?(n|TGkOy z3c~Q1SL=1qbbfym$1X2Sv|T6{+s0l4A$`*u&;>d!Vnz_aaXIux%C~v3FO`{T&jXH? zD_m_g1^V^?0?ADqGFHGg0Pw9rgFaCgR`hdhcC?7C>d-bi;$$gpS9I8D*z&Z`DXHPf zMu467h%0YVDFAvN8BBI9MUSX6p9A|Mg^pkIm3M+1TXSXUL(HW*#q7-~Sq=TgqwWEP%NjQEu}v?h;v2!y#u^x<)IEg1Ie7ezoL^(E*S9RL3`BtmCd7$~6w7Un!Q z-ixc8JhzscH5I1a{IP8!^?q@tqgBX^w~D*UlUGt^X*?Rqchonw!YCkRDFc)>MuV7Y z^zOxH!1y2_zgMA_pk+7ZY<9+*>Nj&AGpqdq3M)~SpMKh=J+Yzwu8n=1FIfIOC;vZ6 zA{BGC!GLKCE+QIOkSbu31w$|CcpX3DQTd3V<$NDZ)tM_8NF`|DkJwFkpS&1jdHav; zYV6%=FrTbcBPYj@2by_{`tzz3Ludko+ee!Zk>2QqdFmBgsl6I|_dQei7A_pt)P*k| zLNfF_B!~hHasXl6y}bClF0y6Qd$3SNII;79Lu>D2Yvz+fyq^$wC8z}!%mDAEg(KP~ zIQJQgbxzhnLHGS;f+q^Ee{p&t^?Vp{HK+$)lS5r}g(4o!GOvdobK^bQ$J*3cl(ibe~kX-h>?cvU0gkV^}9p zL6e@8!Lm&4etM@U1bIU2=3q5t5|lYdCSQ&MipwP`F;4m9NhePO)wQZOy!Rd*y_nqx zIkK(*+Bn*UI0`8f24L`Q@evO~l|EapoDO#r zT2bK`KDPhV!Su=Q-9KjwJ|^QGUuZhFppyuuI=!#&Ul#EILQ!hu|-t(KMIl zY1!!Dp;d$WVjV=$3(~YQQ-fZL(}smKIi!)x#SHyOL-|rP`hJ?W7m~B0puVYZBuLQ5 z@P}IKJpf^_W#}dPaUcFER7AGf%Z4Z~UmMM74FtiW|6dGT!oOu2w&ajuf3$CkuqRo7 zKqV6@^b`DN7+9KyZ2N?G!_yy+FFU#~y}Zy8zN{3!Z9(Yx(CRe<+Z76U58|LVo0oM1 zZYGQPWSqSd>SSQ4mh$Y%yG$|Zb+rc*VA*k@e_JG z>=G?izBiU!;SE|hr1)pUVSp+0F%*R)Qw!0xSs*Rv8!jitXw-+j#6_qi?NVbVe|TkT zL=yts+V8`^(sQ5WA%O~iHimv~QJpDDuZ;`!W5~AFqISdDp;qUkF?Dx4W5l4Z8-#1iic`k^bCH6lh{SrM7Y@i>04E3&>Gu`a%#&m$( zSavw=Q;b)irYPL=qcxQhDW$XIv6aI9o}X<&SU>4U7O(jd7w^i*-7-U2F-*MqXfnf1H9^$&=4w0Ak>gi^Q-`yD`#*nr|a=YPtTCyQ76OnvxhK(I}^T~ z*bs6&wvBKGeH(b&WQrVc&v6R`Z({VQ^@D&g$HhR&T$zgg_G)+kS;?;#R(8IyaXs1e z=n!K0JMZt>7EMTR2;4nI)kN8O!s;Ixgp~T-dD1FBj8V8c$8=kIZu-GuMrG5jBRQ?g zj~cM=oskE(dJ}U{ZFpfu)3eA0yY1I0 zbl}X^U$q2)bZ647Lo*R%JDAX!cM$&>#-*Iy-jz?i^F=#Uz6yL$Y9_)fn`6A1$Zsj~fn?<8?2?#9@{poGMTqKRtG_;6O5yNIMb(}85kq+_*m!8x?Og0=62V$RVm|3Q2S1w}sezZ)FbU@@8+Csc@y!ITAW+Pp4OqiB(DGU1?tu@x)i z{qyyiW9y`ur$tF`NS*%JL+%tvpdd^T&uMQO zy0>Zw`y)30qk6I+i2@wx(LEhQuy7qx~OM-JIUTYM(XG>>24zsUG3>1P_uyY^l}nfG*pyj0}ndbwqQol zjWDXm>lK~Y!m)O4==Dz!cMK4=Vs3Nyc74BF>#F{a(8_zQ2Oe;Q;Wi5WO1UVF7xWho zcQ9W-P9%IcMVe?GQmQF{jZL2&eJ+ujf7U}LBR?8_w0oyb5fwb8{hs8e+2&LG)6WX!tj@(}=iE&XjbLR&TY4=IRWLFRx z=}tEUxm4{Y`&_N(~~(LTYIMS-l)$rAMQJbU)tmow8r{) zRl@Nugr*a)0sM^sAh$YaaVP*-3?3*d58R1@USLh~`%1bypkoi2e#)(#Dt8)JdY`J| zoz6+txf;v!Gr5RYhzgTH#MK~8YDNyrn$kJzarNja7Fie<**c-@*D}@tPyY9xED~0W;@F<%33JJpeBsOMdv z7CdGdJoMp}`J}GFC{jVxZfSPL5q-7|qou z4`oTD-oGe!Ts-Nrgm>}wtlKw^Sduc}L%==r4S=&mdJk2ARRC>F;SIu{7$=7{6W8W< zk~CdE2;O7tNv5C& zwM@C_>|<}*_cXlkSt>hNPWyCHesD4|Rn$*OtL1ZH#e3T|4SFqYxe;7v z2n)W0V(1}l+2(PpMrJCxqpY)^;-{oVOfnQxlaCx%z;3(+=Hw4VW`^{TLF7FWGgZ4V zGR5!4dF@Y9m^C<`<&yVte9m@4rXeH!Vl6pd>6gR7Tc7XSu7?zio^ad4wfXktlc!^m z)_L2rK}YxbSLAVURiZqYt&9)+;N20NdjDzGEJO2bxakS8a{*YD4)G>nmEE$}$1TgFXL2Xr?MZFcPun|;!MSZs9{Mfzcpp3rc zZGP~gwWx_fj5&|JovUzggpr^ns7@Q8>GzPy#u98%PAn{kOR(?v(vb!%O7o`8@9ADw zEPzOE7OM3&zkcMVF{f9ye*Hl>zk%$ zMFIV^D@VG*N;<^e9VpR!w^RPfp(7u56oULRvtjofMG{R-Bul-ZuPQm6YRTLr#NO~d zXwlPAKr!(<{^tI*6KS_%OuyedqRu2u4zL|M!gbDF6?3P3=v%yXI$-p&;h9Ep8n;_& zGBB#^bkxG{BKRv>Y!OW@182~Qm^dNV03{TR$?LaB+^Ohet4)g4rHGmonrQ3P>idN@ zElEjBA8i_yAv+OVf#R~-f(z@ON$FSCUbb0J{k>6MfNcxq=(*L1F8v*~c%mM4uS~pV zQAJqDgwzN=p8Rx@5QCvRpatPRLTypAwM?iDbZWAjy9L=Y{X+*RT_qJjk&An+W|zHP z>{XKI`FI~K)L9E0>4|s_Dgsex59An`N%_FZBT%0a1naRZV|!eBGXtmIh7t*N!b-%5 z1)CNw1U;)t+GVOQI(K9VXEXPEw^Gvio)y<{d=(DRAe9*%G!$E$s|{6Q+h`rl{5aG? zj;gR5gVx*ivzD={$%Q3jR(=8!cckL4ZFpAjok+j4;upogf-k|5PxwkgR+s5zLF>Vn zJOk4m62^G{z9~=qe5oty^YwRogOsm_H_FMd!CjGTQ6N-@p&w_aLe&gM<_@T5Ql7u% zAB1#(x;OT64Z}C)cYgrCpqcE^as~Ij(KuTAodg_G>Vl)}`6Ho(CKT}A)EaOlQi?n- zbA9rg2!0eV!{`q*Ncz^G$dJR!4OHcLUv3Lx;tHd&{%=!5Ym=qk+IlR1MRl$!wWA3MPR z?5$Lp=j)4yr32HN$b=5L|J)_UyD|a}?iob_wG_1SYuyC-3tv!N%#?o-X~xueX47^B z{G2EBY1I}xFKB`PwfTBXtHxC%Brs@dJ%immn75t$wEVn=IcUX-tk5YpdAT3^2JZel zT9%VT9S}%1F0Dozz)!IoKOs)>7({@DE<)R3hnPXU7?l03*6*GFBfKNfLsfqH$X~4L ze_BZYmzL0fvLjD4mG> zCWI5Y*3?)`f^4Yp>>9SQXjPZPC-fLmkU=}PWHxBh zG0D2+HhVvP_++W6l1Vuj@yGtO)ry{Sf~UYQffdZaWnohhKemBevp3%RLdW<| z{0zi@MpcDq-Y}GSuRH`Bmuv!(uOodUK>Cd?dPI}?uqTrys5{;fak%Wu;kv|2b?M1F zOd7>GD*tIXIRpqND{&=YC>?n2=quJj_^+s{y)_R*d+nk9%_XbGUNe5HJIZl>u+VYn zm|n3GL;^+l`*ew{iYIf~-4TU)P0@#+)Mk1dF|*jv3cqJA4JQUpm+QQIm*HbVjqCyt z0O&Rw01!X_F@^3+P)5nfwdOX}T4IaCyH3%P7WD_7cfOeITr2TLQ|EwMjmGc(z*P3t zdb5m(9qBC}-{q;$GWGJd%~^AOBAiH>MD6NSRj+vwZ`1^bH9+-`}xCN zV&SFAW9?rWnS0*s%tLax#HB0!qcciMlF7Rp@9@$jas+IwNRu9DAYfSK|Bj-AR)2hQTw?r<@0!t>TQyZRQ8>+mZ%!&DuI(j1 z1GJXoj`Kj0g|N6X^-1sY`)U>{y!Y6R>ofCu&kLo2*F5Ped|w>fN~U`enyq*96ZxFs zUX<*iP@dwWGBFRA>n&@ZUhJ9m!x_ey~`62-&hMcx_I?^6mk} z>%O;zM<@A{rn?kFR&@dEPV#1R1+E3=<6@oiNPCGMVM`Wc4ly1HrfqB}(A;!M2!1uxWTU84jPbF$C><$8s9dsujA)kKNO9rV7&xl`rmvP1GYjQNH zT{q9~@axm0&HOsJfFA((jaDMO_gS)32~>f_S@R&`G0YVM zuLi0EO|Huh7I<}#sOWwzzQ*7rQOlK@K+t*rA@P+uh;w63!Q(dRs@(tzz4(0X@51Wv z5MALa4}KF|cl>}rpRpcGfKysPHIvJO%=Y}?D+3-0wk@>)IqA5lJqa*W3eCfZbmOq4 zWE|V55E~r&sIxUrwen@Oz*Bi#_c1xtOdvqF1lSGA9M?i4>e2<+S3@ zl1Gify5$}s_^*qMTN@m*OS^f!tMP`pI45_jP`e%f1IVwT7skX}*p3iTZlV3j68v&7 z$e4Su@JE2MmxVx=cQK!m2d_r8lS#YwL^TySuQ>8;>*%aY?f}*TQW@597=*9Jl5+L^ z`YV$ofBNiyjc&7=*EE(m`_`$IbnLqp=Et!M!SUY*fU3^5cVYStHSvE(O=9Bx?RP&| zyUHztfMh4Z{nIHhUkRgOFCmH|S1zFqi{zh<%D z?tlij6LPjm0DEoXB2$yE`f*;PMwXkszJ5mfERom09s`Y2|B=R?g}Uhcpz}B~9Q2j) zqhoRp_UNiHavxCV#}sddiFI7aI$xW1JYQ%~>C*OkywA4<0qsF@(MR|Ukp83$^D~b^ z<^b8;!#>y3ARlVn3lvGW&=h9elqSEUN9?J%@(fHnw3gZHe;_3Kq!y-goh|Ff^22ukm<7ca;cY?*fR*tqh=Vn{;?wLowTNq0spX(t(tatk_ttyMNUxGQ{ zLsN;Ax7LeXEG1l>Gc-*PYI4>eTqVUhYxNHPgShsiY1Hl#8j;DUfn^Y(N|vT!$b#k5})%c`&-qoZem@xFoiydKX)rg>DrR!p%Ef0BaWg z58_72;~ecKjD#j|Lx%NyujSyoyEu(~Y}H9`#f$zqdV~FD2or&Q&rZyMwB0wV;}no` z1I~(<5AK|XjbLfEODg$e19$%lAivCE3D8kVrfX`XHJwN^v>YF6ba;vgW=NAJ1W=fj zjf;`{j{{n%9I#{DWdos3$nXXw+?WmIUD_>rHWSwWvAinW6U|T5`y-xELYwa9 z24wc@&^gc=#|yD0=Y7T5VvOjxsYSg-2@4omA5kS}-R7tCy z8*zV@xj;8qA&Tx3v+%K=)UKJ7mrU!hy$?tt?p&KZLIpJ9m++bM+r-(+AY!ANkmCsg zzDsMUaCA*fhc!~SsrAm>Xur&2m6q$HIPq`F2si2P7INu>mHr?+Rs;Tq)a3A$0$DSc zs)xLHKQVD5LA%Co_G>C!6x{b92Sx~lVTVO6Yk3OL!fTdvgT?4> zKUAabDi+g8X)`{3eA%NuQcq7ujtD%QZF? zKd|XE6bo20-6OZWAHDAZMbtOUljtZpsdog4?cwPB$saboLJ^t&xtepUOCJ4iWwrkV zdENq@eAy1_uTb{^abbM-}=#{ur5@pqFs3G)De zszeN}X8+fn+uI-{%g$5SokG8ZUMSM-V7G?y&_YgwJI!Tn;VY~K;+-({Ej_r1$DLsX z;A{sIsKm{#2551qBW2-`O2_B1s^N*Foiltcj|`rN$iRp5j@tAj*R!WRPQhm@JY_9mgLN1?~jfrkqLUqg?ER_K1FHcG2 zFx26l<8-cTeR3b|akq_BJCAYxWF5>*Hh%C@c}#q>hbE|};nw^yGX=d_S7$%zTafyW zIE{%Q(7hCLP|@HDBx7khx2O&S=hmihus8pZ8U5|tkcv+`LM@Q?0N9cjyY`ngnOXNX z@Nyml(jNs_PA^O{K@u;_R;I&Mo3>4hSa^52dF%S_=Ikp=l{}3?W0LGsrj=PECcR`+&|C)F!d9aohr#EUl&WOJVKDZ zlSV*`iW;yAotzR_%bdd=CQ%Vb?wh!a$aEuXvOJa#5dBFQEH zB&s_{$Z7DtMVuaWJD*yjFtTB$;8F(fD-U->fsvqw;#QOBJfssyX!?eheoR0$`66ql zG-F9KSIN27kx4}87?HMT722Oim<=PG^8cSC77y<_xB>m|@Vy3w75SifpKp zXA7&aAo&{Jh6~f{vWWRcV9W0J@nR!c_9Oas1b1tRqTlkx@Z)WqqdoRRqYYVwoxT!< ze&TgbXA~b=ObVn4TbOprJ_$d+<2;Z1cH|kLfLNd+RE4Or*^X3-I6=(?f8BDoSNFb{ z!U)Y8lqbi$28AQ%xx`sF--EBye(G3#YPGO%J3!Xv;pIwi^WM^ZiYY`8bQSgtt&sq$ zYEVIzUf8nZEx6Igv2NH_u5`lF`C5!|3|~seAh|FecB_&GDxQHpH7VWcEQ6P%TK4(t z=>DPAmJ{1R3nf-#td$^4rp|fPzsh!!ouo9rCH+k~Alqfb7SevT(ysY3y`HMd3%qy zW`pz6;Xuw0b-f7=Z5(9_NWVyl!|(%jb8=j;+JgAx_$6jaKl|e3u;Fpd=wFP`j}i%m z*lZcD*QQ>FY#gOsYva`kItO@@?QiW0lE!Xs8M7e#=v8#>@?zW*I6*son*GacFG|JEh zoD|8nMe8;hbfTTP{8E%=;^O})?oH#NeE+`j5uz-UeK!@dC4^+jkdP#_*R!|y-8=^Lx;47+>POKVspiZrl@-EE~ zP~KMx?S3*x`&oF6v$WP^?!%*qb8TtaB9xj&By?nW1y1FeiH*SuEmxo`zSgj`Ypo4T zK2cL*bYysJ2Gr#_T6yimZ1WFVYQ{B|uU;4Sh~vtEGU*Bo&wo_`eo!pPGE{iO&1D?7 zAv-0DCc|^S_Sjl;0aQmnZ>O0c=BAXh@>o;J`lX3fMf06*hV;WbFZ{sEv0f%d3~89* zW~bt@@;!zpNZ~db&9_3@17D%%@284g2uSGrtZsH8mk)+zh7clfep8T&#mXK!x1VUP z1ha^xxWEz|T+Op2^?L$I)0d$t3dbnL-^$Xt`M-%pb?>hG*-H-Lk=Zr$jodV>Q7?I# z{Pm8AvHa|b({se`;Td(Z&J1o;3W!&&PNM`E$EZ03GJ(3)8a65WfhziXdO4ot=W@Z0 zuJB0Tvp}Wvvw7B?OO*+FC%2>79S z@nhYh2xAzSpM#haIv}ej0*bW)RCSKl;F(o^9|>|ck0k&Lq(>lM|5MX@|A#nhp;Ndx zf$b<%w>m@0TC15Z!Z4?>u-2+Gt4USm^6kGK+k ze1N)VEroZY3~#e-0#4tsR#YXT5e^+fHo@bjDslYY3|sBnRM=EZ>It*De+g@U&(FXa!pZ&91uJH=$T)q5s~D6Q4BTBo=)+Ri zm_m2tSp8nUGdk{(vvvc4dmcW|Gbk#PJ^*6zGcqXa{;HfK^A>|7t77L5xe_lQo3_f| z+EtQN07UUjxFS`d3(VBIV;E)dJ~_cNx`91`ZJ(4c8&CtfZN?uc6V2}`$KTXYJ}>Q& zb6~#Bzb4qxm20JwHhw!OU+*w`DjV}}PA3Gkii{y>CCrDFfou$pUaq9ljTA|5$*lxK z#JpLcVtggvF`WMVxn##oIrC6~_H_+jxz_Wl6wtlkOt;4V$eL;b1+R)$JaBD1mWlW4 z_CPkX_oT%;XTlj$pu1nw(t$q1KSX{tC?-}`lc zRI16(ec=hLyFk+CCD8{7sUlZ`=)ydHeiMp~S#L_IUapqehje;gX!xv%aR>$|+&XH` z-3Gv%CsCDB7k-f}qf)G!a1U`XJU>Q{;?;@-5vCP|Zv)P^gsgs=gb6N1@WE{-C2!?Uv|7akUGDEXJ#Nhy4hIm_;SE#<1xiEf4;Al{gsoC=$630~^qG`K{AfXUIVfqZQ@Rj*RzAVnHR(8ibmMt{ z&*{#l?_JHA`HJaW6H-9=8|N4>e)0JXF&nhnvU0@8pf`cjh4~ssdMjCUFVBSXsFxv2 zk^Y`;Zq%IrKKsJsqoJqeKkb)KINtHruo_DP>$a0MrvG&10-Xn)*hGZKjd-KNN$`P;bCBrFIFj#cS-U~ z!@`i@L}DVug})ba>)Vw^>8*@A-t#xFtUow!%(a0x!y#mMgPr~edu$xrhCQ4(gKu=v zM~vN?Y93-qQnn)eJHKsHmO|5(G+lmtt@Ce7=osRcqLo#b4SuH0pHFK!H_fn!1Zc#y)D0cR zAnQAd5|k2Uc#XPh3rb3142{pstPKOOFh-%qhMm=fDhDT3rV49*1*Af5Bv~LvJfElc z${9o!9-4V+f?L$*i913bnj0ND{nPf_w_7Fo?)xk_1bTV^^(Vy}#L`^H7+y3;4S2aX z2@8?L957eZT18+Bj(%{Ai}iFbEAC(5nZIHbY*P18-=`Q-A-7*G(>nWXVQL_wlOo!l zVmt@Z-lM8JZh$F|Fh^%`%S>yLkSWL-v1rOg#g+;4bq>JFyrHbOyyO~z)IgHHASI9> zC6_>?IQ=yPH0hKUF%&O@$KNP>#sxsC$&Xx23(p10eH;+IQz7AZ{JKDiyNI2Z)~+*Y zbSrQe2}7lHYlb%@Kt;3?C2B)T8I}|+^w>_v^l-ePMWA2c+odd#bPJvdkQXT`cJ91Y z{*340xH)$53I++Ltcy8H?M|u{ea(;hbuH451~daK=%Zq*xUP$uF`{9pTxsczeA zG{}96bC@|D>wXis5k}ZSJmq{hq>M<`wi!dwikT^fmZWZWh79vrAmO&aZsKd}!YQc2 zp?B7vXrtA6-~Xkfi~q~D4z1HtiiZ3RxUpec30U#Z8RvTeeLAYusbHm*GFy+}sZ@`f z5~w<1Xet(Go;M?$LtYVBJ@7=;&lydAoomT`WY+hwgggd3EY5lGeH5!3!DHw{S#3t} zZ+46OR3j?`TUPlNSO+LBb?+J*geI3VG6^~o+C6z8-;4KgCOt5fsW+J;qKUw)fNI5x zDcuo0L!ZSrn^8p{h8=n8Rj^#W!~Ze(jnB?xX5Le@T9vBzNVDOYlJ@kM)tYQEY%u9S z#y(ZrTWSqC2)r?7?2LVipfy{bIi2upSW=FLrcX~uMiYv-n=I6{jufHHw0JU^=Xo{u+MqdL= zj24m_L&{Na7@p#J?J+ zug(V2ko3LiN%*J`i@TjtGh@+O{G>ARD{^n;7aPa)pVN6(YI+`N4!M@L$C7{2vtUqO zy$#~8OUfmH&S(x}Q^%;%<q)%%&7055y2!@pL_0FTN;WWUYT17}igq_ySMF7X@6Za{*8Vx-n*>$>(oF_+_Afm# zmo#f=QZu0^-be;gZ*UbAdcq-AYZ^KpG&0K(>aN3->){Ol{Om@ld+mnd^k3q#btDxRRdmwg^&jQFVizeUge|E&}EuZ&)p(J^#_Iv_v(j^wD7c;E#b?FP62 zD)*>y?I(wN#_WFkjZ?KsB8(p2vidMEdymC(T{R!Nz82h5|9jN-3EWxshnCkX#aHm(BVH6xg5akLz_I>cKR(XcFcZs%AEAoWz9E_X6 zkCbGFFiUWrsNL8f_;Q;B&LVGd4jzFGVJw+fOiBjt#37B!|s5&MR^mwwetk8R-IFrAJ#6?RGp$< zG~uY<)_Z61*^gLTv`z+n*9b$OnL{L%J)J5iG6Yjv^lK7WyKtV%y1@T7)Sb&Q1r(`6 z>@8~7`v{>L33EQ*C;eY!mOX7d085zc<^f< zHNMJYS+|WsIw#2E++=dU{6Pu#1DJM4xhub(*{|8rff$}CMw4!e9aQ`)op28lo3Th_ z_`hOSoQ3X9!?_`Z{Xivh_hQ9!?e<%g{U3=RQv$Pts?KRWxHJ;a>~L{%nlc`$Nh>L< znY6I|7Sz3;YLhHJZ6fUEn+%g?1FV6~e_(vb094G>KBW6ZAgyV*5-tG28;xYE2386F zwH|NUU!j`)U#l8c7z#~4Idti8@(4$8GgcitWrT!4rG*n1ibzcca4RC8cpGtMTT;au znO==}$^Dd?vCjUg+{B>yPU&EIpSkTP9zRnw3->1#<>LE|UcX-a)RkkDR^r?m<`W`i zrY-a~T~973=JMq>kE=IL#1CcyVSCZ6Q+GK_OEewK_SIOqKfEjw$MJUl(fcELjKXBv{`o9{S ze8hCm9~(?o7S9+_$v^>^2}4TJC*W_c$BH0gvi}hjHN%y-I@)TQV}81Rt1U^A*#p&w z(csZ6MdCz7TVG)Q#A3CW9z&G^h#(w$iRCy_9AZzw)k~}I7-l!WCWQ?C?!o#-^l&Ys z$Yp?A_&oD9cDEIUPAX{fx0*&Jl!mAw?fZ3@dc2b^EjjvKWmk;l6Um|#BBO>11E-@4 z+vE{HeF4)*IEZ=&0HDSuo>-12*U*ipp!C*eJkM?G;ASybu^zJasUhvQ5U$9nbLkR% zJrAygFq3SsP8NdW$?ObC|X*+GF?}re~PN=o$tRy>}ytL+U`kRCJ)PbAaKZ15L0A1%<+~1(Zy?fk%)`WY2 z-~|}KI20uU;)e@0JB-Z~D%oE>4AMJo~1Hw44WVx z8DkVM9}`K>Wp$a)Kpg}pL%dOpO3-y8#2D#V`=D8T`tBOdKKBBpgA(J_t=hF;i^v?j zxbIbd=@<8_CCJNf_sdqYF_uBi1eha+@G{_UnXwu3Yu0+*-Jm|I!dk9p!OI+ZnsKu} z*UblYxb}|2N9=|sb*_#)O}+FeiVD6Ys!HPm&4^O!}gc2C;1=^gP=g=Wfu+^1y~x1-kr%v2-*2N66GQ4!XP8=0p`Lh2;c zm_2DYp757*A1A$;6WBD=R`$cB5NIyk`Msheg;Sd-sJZXq@i>3)#|-yp=YQdP>Rx6| zO3ZjG+AffaRe2-f9Q)Aq8!zC~>ogD-My7+~RIu(K)G_Nteh^08W2E7Wl&VAcOu;J= zv7%WrV@vNlZv=>&J4UR1UvC#MSCx2}@ypZG4%J`75On^Em#mY()$17Tfw*Y#$xCpF zMOUHsQ5RyizdzMzD_k>E6|~8e4!?9?K2kGl=jlmi7SapFi~azr3XNEH3Jw2GP>OzX z{A^9ayxfO0TMfg_F28Z9U#4Ju^C_erfNRNOX(4uAefn3_3RE-3nD{p=5EMr}twXw) zeOsL3X6X91_U8UP4v_!ee3n77Na{OpQ3rcRmxVEaU8mXzzdKb-YXw3JuQ?5-ne@XUR zPHLn6zL>nfRPwjydE?7nsxyJmk>>fhI$a+WCytbWfCYH=hai&2s%o89Xs-u1rZ^X* z6l$N|QdL=9>%Dk<^iW^)wc;JetK9X3`9K;cB?Aj~FvW>5Mxc#RuRIyP$vDdDge<2o zJ!}v6J`iI-Hp>hX`JU1C5(Oh2{R0c%FAW+$J*ezp8J6Q2LT+CXyK(MFkqL}0b@!K9 z6fcBq>|cC()S>*;eOg0bgYMx22Y1^=N|>A%mnoovwrWQbPdBt*j4VOd@*z@ezDP<< zNkgB%y2?{5Yd?I=zC678t1Qk}m*c>?mtUq^s_He%Q)i13Sb&(Wj`$ueCLsHrpZUz3!8QMagXNY%^akCyiYKQ!Qn8nGR;DPnfzTn{1l)~Pkf)?w zWEQ%8>XT|jvg?#yxHxn}2d+id%6uYGGC87B8_+x9eCm6-|JUs(kI?sA>s|n~K8mWw z*V{k`0iTn4>}Dy-0UD-0o%O7ugj)_(ecoPuw-H;5Q~A9zp&B1nPwz}u-c|_I<-Ax3 z!gG#t=oi>^GGJNQvxbgI!pXnhp)*<#d$EcP^K8=8cQ+MKq z?C;a=yf-kXJW|*)9%mImtKdGa;0|evM7}!3g(;5L6y{I?Nj+&Da}%<-`?Qm<CTpcuk z#-TN`lu6o+S3iP&gZ?`PDD*`YZ1Q{o(%OmCpp~EAW6ml3P0i=_Sn7luH!hf)(7+NN zjcHoMS9mo}=zKUfDbaUbj@jYx02ZF}4_6HFPp;m7g`Mu#Zh;*T`nUm4V&>jNQf?IN z3~x-JzWWmxrNwtZFh@gfEInrG{kvVAw8fDA`~oC7z_8UYK2aBwPFD|Qg$!J!J>gl9 zfs?*UGIJm^tse|!N@r8S;hjqvLcgz>q7spmU}XFT@o!pH)JW^cQpu%2waN}9idJSo zAn6ohs~2&GSq$0vsKqvbu3yKq8kEIQ@5YI1#<<5YY&fp|D$xU;f~rXwUv!193)Q+UbTF3za9SM_+x4{52RF$jpS? zz_^W=@ho+3ycouwlp;BP#T(CF;BDwq^g-Fp+byE|fMwm!V)4rsO6TOy*JZL)>VVki zQHJ$BkPBJH_8Gwe2f>MAXlh11ktP2oI#OKJ-+cBgCAlIn(0^A->b4#Cug4L`PyRNZ z_1;;gp~1U9gSiC>QTk~u;Ikq6!kHn_`58j=hPohHt0ipc-6y{FF=q>z!ADy+Eq;!F z1E~J@_~&M^V;6TBBdxJAxcXiox0Q%J;0U?RfCl0?Sx!pbi=p5YaX)=`)mKi7cMH8Y zCd{d`Id7_~zNCek$(_3BE~b*7ify<|o8kxB4}s%y4W&3~AQaw^%TG1S_df1TF74Z8 z${PdwXb_9g!LU(rGz=KrGhF@Y%7^TtU%H=^LC(m6@^-erFHr?GZa7LgJV z6JcloY}-{xz*Ec{ZOi^Z4G7`Mc1TWTW89yL7AWr~&=nwW;fT9Qo zhMCi+Fb0oYk)mF;{L5uLr2l_lQ~g)dl{*o@V0|`*>9LD|J-uedB*?84?7aqj^|{wP z+v$r~5sKOUR?3|;D>-Gxqu~vE)OWxe(#Z#i;6Z37hW)V}jFvnM@}o(4U;BV< z1{#oWGF7*qf@kZbiJ$-j4Jlm@)j|uiE(?Jooi%*R8m9&LZ>nNeSv;S?$`lwlaJ9jp zdzWb|VI;r=aUT#VY_x->4$$`U@1HATQFWpiZRk$1-`Hxz2A_d>9o2e^mRk|z<}WR{ zB=%WU-Do<^I}d`k*c!47 zJ(wXrlyv=f$82c9I~C>aX#KuDd9!+7jq{8^-T55x838(E!MK2++zcesNei>LW9<^_ zvr57%BCVgeJT2?^Zho(9j9B0VW8EZ@mfJyT=TvvRUCOp!ArxdYUF2M|L6}O3KGx7N zZ68w-d-`^VUWv46b}UjaA-zw(O(gB!Ic;AkUUqS1@kOc6bP#3 zi!k>rBG!072LBy!PG2OzdO_*vzqQO0lyTkg8Q zn#I6^v(^a{Hj6|2J*`;cQ_!sKQ@aeCkAoW>$DT?gw6v@_T#diU)ggIguD!`vs?Zh1vP;?$hts4nuPoqWT-=WtU8!^R6jcR zx8)k-NOi*@*E`$tVY1KaW&D=aA@%NNJV?v3<4EZ^u{sq7oGP;w&A623>=W#>lbgLi zW=T>1aZ!2RKHQM9X6o)UWq>mQB@M<=Y9urO7ItJfx_}lnd}Q(u>^$^^D%UKl^y!nQ zz2i^1afQ#mw=FWI�APLq1LHuC;!Q~&d~b>JuLqb1Y62;_5{kvb)?MWd34eo?!R zvbZUUM#wLbdJIie?=Rd@(DA~p&iI>HqB8616s{a5dh};E=yO)+ee;yknk(A5mdBSu zTi)@10iRoD@qM`$e$*?xa9^ADVZtY4Dpmuk3MHG6Or0G zLb3rm1h~1;E7l>F6{=^xG1VfX(iS)W?`vV9kN&sp{6Clv|Gg*uFA^9sgu5VI>yAia zNGjmY4Kg~G)%hQTRJ@mu)IbUuHTrAz@`{U&j{}K*IE1OqZ|?nfH?ePnUgoI0%7@{g z-V~{gF&khYy@ylsd|hP+=FG^P)vp7(T1PG9X(IHk>Fl-;1pkPy==R!tb%NOsjhk)U zKy0`BOix{9`GA^hhi$y#gBBTi+ct|2a=Smu;AX-aQ}*Q5B!n<^&k7uw*9{9gDRS@J zoVYOCgdIv00+|#&IvYbXij3S>rwyok6W`HGL;G|#mfV}szq%QsQLH`cK0G$+WEsjz z(I&t&Tf#}ACv8Yd>>EP~4Ew)Uj7tDKKMAblqJ{k+%W%Od+IRenkLHi1^I&a$_Rha` z;wyn01W}{~8%RMc5!(->bGk24V-wA)O^>BaTc5dUQ=stso3(Eod4Kw;3+2YR1Kacaerolt)Al8x?-36jd{hJXD8-x^)J}Yc??4A) zbc>3M*(1djDk4ml(=|bM(pqF&1oQ~21M0!mR*LDdG`b07{{1_TifGu)htH<%qr5L(us-iHrJ%cbf}2ol|i`VK3l$%Tz8F|8dC6pK1k)anugW zj}L!Ix~2t|6txbAW`>BgDUM=lYu*zE_5feBdo*F}Q10xE?t|WaqMrI+-ooH?A5;de zlE4rHR7WAxbgRH<1YBo;`2u4zGd{AR8n_#h72o`vV?41h{lJlSZ#QmB|Fs8Kxj`Pe z-)6*WrY?gzPE&>~sZUSFgoh)AAf(yA%Y+5CqiW6h0qo?%y1eU=7B6)+L#{)?mc5k- z_{aiQraZ;^eML<&kKbuGhrCFo0TuJNM3rcmlHJ!9PXGTH8jfI7z9wsqB^hZEk87zW zq)2$_9gvNS!tF5vT$N>z9YwcM9~H&Wd?R7~CL_ zc7j=!H_22!UnM#JIUAI#`Y0QS->Lm*4JvgoZ+z?b>^=h8goO&*@53rE^kqWGz%AM} z12BSx$Z1c89%3EQPox}CXQ@OOB&fz9g|O21GYE@$tjWLE6y^VciD5;Y52VjGja~Hp zVl~+S18trQ?7d0^_$THYoR@)^<^%+iQb8IkJEiXdEqfuZ@e=Dhe#6dC6Ku(F6id%o zmTra?W9Xn(KtyjQ)`);~s{qFE5uP%LuRt(`QGgF<5*U+YTP~uVtArc<9JWjHal@Do z5MVacHGFroP`3EScgu}kXErAkU3){81FILF-kdyn*ODVq1>bBb%vNf)-{S<}PKAN9 zz*;{Gcu%u`4IzNLOxm+}cz~_rdH5+s2z`phWs@z!l(k>?1IGZX?>7rzI_q)OHH{O# z3#(LGJZV+Thw@%Ua&l5a&E4Z?RMpRVLoW(pQRP3b84}!$G~*|OvB$i@5&)= z_hxbI<3H;6Pe{M7`{F4feOsk5k`LR>FruhOVkOXuR!}%){B%W+HlOH_>ItNE{|hTW zD`)g0j%1}gHUZ(ABF4Ljpq==azteof6vYxSWd0tLLo(#*6euxodX=SqIQxbASFy<* zJ_ln@M~iGK#^Oj-2#RGX$Qx%8y@|2CZN28-EBuDa_MJ(Jl&No91s-YPMf-1nrR^iq z4T*3vy+kX246*~V8ytof`bp7C^Nta z|IdG6?|J+G`@(m-kqJaz&md_dxqg`xWAy`yfN$$v*U(NM4XNK}_84xE7R8Uve8F=& z_OC(2UgkmdD!5N5AXrYlf?gZTc+gp*X+Bm%f5Ldz-M3PJGlY zh|UEK!L1BwffB|!=&8^c<{-23Fwo2#mbX=_>=yIk`PInLhbfjkCr6iUtaV=ck*HbfX8<6hr9Bqpl~b<_DVTK(G}2I}+{h8+~yJ1X5Lu`eBFh;KQ? z5%Dj${t3ePGyKo$byO>$onFQE=wHs9UWN25XgI2sQOCouY5Vw<6gtW?N5+C>)Ixm4 z2^`VK$Yew%^1)vg+4>$L2a6ZFaCT5_6ZybMj4BgTcH)PsNbMZ!8&BKMngbn{`z$Xx zetWxqG|#}2uvB(;FX*-hXc#f1zdCeqDhdgjTlr^apC;GoW1K0?4+ir^Q>IgRp%l7- z*)Cz_j|cWuC}VzObie9L8{rH2pHrExUZKc%!uft2DUnFA zbUB1PGE57C0Ma$vTQmdTb$VoYu}YY2-R3^gO?^~q`f-w^SL4(JmT<2>x$I}FSKe5Y zT&wkGH*tpQMCIyUwAblw+Bv+xUvAx5CHxMzNROSdK#6Z+6C z$l3H}?v*8(d*ZwFc7~;Jl#c0^QmMuwZ%O}emRnE!PCwTj5Sja&rNlmKlK?dJh$HH= z50mZ&Hcv&l@=$p-o_G&!ovKXD)2R0_a=r8(OVJ}FU~E7NU=0u;8af8kidQ4*5Y<;T ztJ~kHiLth0E{f2B>LONp9K5O;9L22IFYfmRQoyrLsoiX=>W-FNhK0`~PneK~7ja(+ zBi%G>=Ch5@E(ky$6FA+?W#U0>XMO8`QIlraU59Bl{#ip691Ymq_#*sYKKtL_fP4)I0GFUD=w|L};p>>J&yVgQCT3PzNAXG0OAhra4fsQGQ zJK`=zr4F%m6nNH*r`ACIqHUKz?$q|@R!_hd32od_k3MAF zTI1a@yrs(in-9}o3`STvRYr&`C3`WGF!!kHBsC@|&U!y!D9$8)RO?{>a5&Yqa=Jh0 z$Hk*h3!>1p4Sq&*tr@kT(mG^JCsIm4htaVLuGS)E#dnF!K8pDzd6tQut-@Js zOvFDYc}5_!bNm461Inaymfm3Lv6^wx8-9gRT$1M=?j{J9OYbYU(7u|CR;xKtVebZx z=|?Qs3c?S_?CFnNPU;y-9QtJxD-ASvcn1D+myQS%Ne&aV(`CRVBs{RX@qB z#R~joo=4nf+SpNQk^EIwrGWt1%|LQK?(FMQ&)3!E(QtqrvXj#Eu)U;9Ror~fZ+58n z`Sbp_{}lkUchw!n902^VvtK1+8}JWZW%_IFr%N3#sow_iufb=_L2>=~gO83L8?tSb zi)52dfZ!LwdQrmF%j=+Pvx%b}U-u#xV`BZ;7_(+NJ5gXG9b}k-`2d(oB2jR9O^0yb z$t)7oF860=cWO{q{R}NO=juX1b=w%+M-8m5HxS&pEJP0_*K`+jq$i&31e1;hZMnL` zc#5wVLtx9UjYye~pEIquG_=BgvzR^P2bwapTUp#C`0%fv#+evX98*DrsnlzX=&WI! z3<8dYT?=v=wTyu5e36X1DNytj*gF6?`v>L#+KpoEfbTQNl>d8u8yM=jza^Con-CmB zsS3e5VFjF?9KZ$K&FG)Fg8R`7VruFVE6p23V_hgn8!XAfgEL4rKRd)FD*sFT(1j~= zpTR~TV!NGmMrxV4*b@v-$S2Y!BOl7il{K1&BQn=$%wIU`4Y2U^0>S9Op%5A2t)Wv8 z{RbAf8sBS%f8^@aeEVzoXPXQ=@QyjofeW5%fB?GQ#1IEQf52@Y1YKcD?9dkT4e)*@ zKsri!imXfN+alqcQ^s$tw;J(&&XRi5%dng-Q_*QIn-I!;^;uB)^1O2LaXpTxQS4V7 z<)k{Kf?BDfUj)=qy3K*(VopPh123j5?i51Uq1p4bDfXuaAJrdS64N?asO0}{T>OEA z^*fQ_oguIrpW&b*!Ieac?JA_W8S4q<$v@k&kz@GngvqFG17i_6+tLW4FfexJ2gChP zwtmL=;r(A3L8$)sFF5~^aAE(`^UotS0k&<7Z9-OvGs{xQYlcVhlq#E8;}g!RD_|R{(*QG$#VAq?iMuh5 z@JJ&Cj0)8wB?+Tk?9$*>;XJPO-Jbge@e$*ged@wT&Rg&i5af9^*rcF1ke;i2prNY( zyT6la+8UbHd8BMc`9<=G%ato<)*dGx@7>kB@0zsnTLWRwy0@?X I?|(-B7t{svbpQYW literal 0 HcmV?d00001 From f8347a346493029cad8c67e3678f881830516a88 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 25 Apr 2024 01:15:45 -0400 Subject: [PATCH 307/329] update --- openai/fight.mp4 | Bin 0 -> 616701 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 openai/fight.mp4 diff --git a/openai/fight.mp4 b/openai/fight.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..24d83be45a34c190f8357b263561bbae73076eb0 GIT binary patch literal 616701 zcmV)HK)t^J0010jba`-Xa5ORi0004Lb8l^Ja5OP(a5ORi0004PWMOmw0000031Q{{ z0015a1sX?IO59vbLqC-7#SuKwL#P4&%*_7+01Bn_ z@}NZ@u%C9M3wGiuRoQp{YSFBW#ZI4o2Z#TKzFl?KeRbDefB*at@Y&MWe1HG{f=6+? z|EMHF6~G#`>G%usOeX~k)n$BX9Pkr$mwU0$&Wf1Cj@$`3;qaU{;R5S)1X6xI;6wz+ z<<##7@&Eii07>3xzdz&h)Ay-A{=eZ5{67v#%j>SY>yFF+;~oC`>;M1Z`~r(cD?Y#f z?DcYmYM=xc;E;8zU_=jl1%((1i`31ra10N&rhodb7$D#X_`9gy6l$iw|Nb8E9@=Ub z{oG+@+wcVzNrutmV^dr%_@3jt@<)~)g9yyzFBklf?=W}v@BwK0DFW6TH*yu2zTE!n zRnJ*~csQ&#JfhdYYhLU~=qja`F+WPFp|+zEAUHeip%2zw;+~Sj^qc?vxpOCx!$p7p z?o|7F#nXuO|Nrn^-~kM)XQD=8U+4Jh9m&SxK!^FT07UKRqcH%J@lGWI(>Q;v1*6z< znJShLqg?}|Yyq?cniYbw@eKR>P8HDIIe3v|?n!Lla${w&D;x;B(^CHoALl(HNIBRTWCDz?+Ef zCf&flGJEOwy5J+dBe4M!G>`tKujsCF)R(j~INYSETnElYFYdqehu9?h6wH-%@SuVm z8K8zv8UQ}63|OdUHqY^&y~CmH4Mu9!Y~WJ{JywW_&mFzT4ND7oo{1l#~& zyoQBm|5HmQf5HL%07n)projjiU2}yf=k^UiR7!x``ZRA=j$_QEn6G`T>a4y?jXnQV zR!zQN+fWDP#jHBT9K6)gtOxz@I9>)o#DuYWM@zxc_e?zkAXg_RX!Z4aVj@}W-rj)t znJsEBdWpGEkXc^+eFkTuK>zl$;IsZ~_MD5c$i0JHoyCMFok;QLfpOJ4Ta&<)V z1F~v0fj=YvZ0Qn@0$2MkyiiEgG_~#OYS{O2&sgq}ppzP$!VXL@S=bDs_yUTz`4jVa zQ|M{-3t$z|w7Fl}^`vZ;fDF!@gRD=GI+yTv@$M5bFFq1KAs4N;|E<5w?~Bs@0${^b zlEQjJ*V6rU*VgWqaZ*K00_)C2f=2VKLEfL!`6?F zvJ7=kPU2FCr5VZcEN@|cewb_JSZR3NDFuLR0;8#=&bC{RnZ$$A-@ zNA`u;?fe3-7GHoJn%WY(ktB2V2t~d10OiBKRTc*m8}Jm4oh8JV-xX)eg>zBjz#j$_ zH5#0reh&P1_h%s?F^akjn}BK}&BDB^*iob>Mw{<|DXhTlVeo`M-hTz^N_a>5+pS`t z8U=NH{?xlCg<`=ODT@8P2ZrN!Wvu{9cJy1PS!f+@q6jkJ|K|sCxgi@<{pzv_>I$Ad z`+g%0Gk4{fr~lReNdSOV@6cgX4wC1f+A7`KU2TQ|y8Ad&{kXQ{hZABYi`2fXEsZ>5Uniw@s_d&xfE9_xUJ4F|6tSOrPlNQ71D5jH?VDJB#eIYy@U zWCG-a>$2^U@A=32gKa3=;RTr>;MD;u#Er@=kDs!LaMC7 zZr61qTcN?m?RaNoBE_!dHqHRT2FD|a>p>BZBMye%kAg88F)xh;N)Oic`d%e$Jb*rS z^ft6Qa>?LE6!YZrH}bw9ckx=hZi6YNP`TK6h8$`$bMw|*=nbdAdW}%K{_|jB1Ex!{ zf5CW%FX*55E*~`*R8W)p%}t9axWh;Mh(HBL!FQUTftu(Dk@)}rVnJq> z8){ewZxpflV9P49#2jHzA^7CZe-S+s!@>r32rC<&t{v6qFp@R`)y4mv~Z{90AdF!efFq!paj1AsS0sQiQjf3E+t>q^ik-VAGV*8m{f z0vcX5gFuc(A3uex&ye6jclkH__zSzUijtct0DkMo8yd4G8xW-4VRCl{H=1(DXYGlcolqbfej^MeUC9~i|NoFkoBq&ab|P`@B8l?h^mNlS zJg_**gnn}3ILr)gYmG#ro2oumk{G zE9G$ev_oNN@eC`jdu8|!x7VyD8@{?085f!w1=Tt?g-Zc1a9Of@2RydS3erm?2ZFC5 zWfhk7Qgo3d6`QUCkPIeZ5ojd%eK#awwwi8OIBe3wu&`FBW@d(-WW-yWqdCZqa_eBs z(-zzR=ipl3r7lyfzm%PE_)_hGv%h_OvT72j{uC>2Q?J*Bq3{d=D5Fig_kcA-#Z-O> zEO}&pKBACfWIbQ>e%pD>oXr7D!QlxRTtIhO>M!?M&s|^&>Y~tR4kG*>Jp?_tB%~Q6 zbu6fs4Y%*>$^I}|ET@f=v;bs6iI2b!n6IT2UIL0#FKx)M_frdTJo~7<4iq9+)pd&4 z=2#C%R-Jmhb+-^?FjW+NRNS?&1krn{Ow^r7(P{c10d7dcH}UdpWHY zXgR*scX_CO5I893^VyB% zTL&@8tcJ!Q5N68ghXtt%6kh>b>mUTtL;f6~)-16Pu`tn3H>CrDvU7aG=ziJY;uMH> zco^!iIexr3V0)m|i~?E*g8zM`r4gUsOuu`LRp?phko+{ufBzdtw-5L=0Jn%;3JN3NKj$EIr{^OvrhE?$-Me*^ z5Iaa2UAp*_jb0-^{jZ&Vwg1Q){f_J);p-D>a{|p~$5Q1`79mb4?g3R0(nLC|$6!l; zdk9TdYGRWDb3%jZH`Pb<+>fT*&ItCKaVW#vJGW2qyRtDrf%<9n1!P4^r5W3`+&k>N zD@PD9<-EEQda!#bE$Hy|(d9%$wqgDN!Vb6sfm+7kLImp|y0`jk>C$iW?sIL(3IscA z&D{hMaIAV=YoHbZCaPpYuwQK>Z#tVAIuFIEjR`_3|t#h!}@vQR`gi`u0ckuaWD4I z!rz+`1p*CE0i0#v8XH=@-pyD5eiC#y78hV+`RU%=KbjR;wh+bgqS+QWzE!0b5K_o` z|5gDIL{ykbO|^d(I|}j-eh1Ge0=D#-0Zfh7X-BzBxnOVrEy9a^Xbx2@LO~zRz+cjD z5(6}^a8veNJy)-RtEn-@c%7?SSPZWZtQv|Hj9*9^S}q6tm~mb^?dJkTby2>S;D$Nq z2-8lNKDS!b$id>T)_|bzdpKXlr$YwYb`v;~G8SAtiyMsCX=`=-X|QJ3uiuWriF~VK zT?vFw@_%8SeAz$zN}j-If*1s0*)|TSl3d)}OJ2S$7~MH9-?$7Z3L*kr%kqVoIdGAJ z(OO-V%2@w0T?XKEfpQ^TSwBzUzo3BgPsw4ZS1-XRCdnQ!eu+_(z-=RmIUPl#ZFl))dhZ z$-m7Ub_i2OIU@}F>?q6W>Ll^QCAN4TpZKq~?~9lIcG^YvEpskI08r4=`y%>U+@$8i z1}}u~Y&d7a1i%4w{IuL&(9%v2mH`QOgcouR1fALN)NnNz4+ zuX|h3*y>8)hR$*(>A^3rmi6@McXeO{W&}&a6I=EF^WI|rw__@@QQ5^jXZNMubUc68 zKJXNTWh32J2U88jUXzOG>i*y`!Sw8(fl|-R00ao7>^wGO82~73`#{vX<=G62S?Ld44%}+yr1dPylNQG?seXyM;mJvsC}HAE_{F z7z10~>EI!dGgr0#M?eaL#`aa9sx^LW0u%R;V;oKrs|@~wiC!55NtgiKv=M>i^cFb1 z)0aYkIsg;IDgf?y@Bk{y8J{2j>H&ZNz!jhXAR#k`GvVPdfB+x^EGP1=a9SLiKpG7o z%8I)bl9nYzN=cpl@Am)y$P$O5vt{qzI(X@-Ctj9vnF6MUPyu2W%(35s-O8M)%fJ8V zEh}VeF9VN}Lw*Q{7gl}@PsGCJ zXdt}`_c8jyW-nK4YpDw9($>F#aR3D~7OdWSI@$BcxhnBO=s6&B_4`M+{>R9w|XR9*%@;Yn-qZ}y+J?y?L=^lU|~ihNm9(eS*P%GM3N z-OTPwmjRj&x@G*{mB^Yaak(ZOKg_PL1+RNyq+mDZxW>|O_nO}#FX9_*(E^xj>gqfb z9;&~qAJcDb+kB~W1#))nrBLujCXS*?N*_D2Y^3|e(cox{F;2E8Rvn<#5SI?e+iPotMz)0L_{@D(6&>2YdsJB@O){m22$C9Uq>W=8T&UI@>F=cKLDQ8_PGIq zFg)Lf@NX}+mJ9R)AVORQ9@);62AYt;>w_KtOr)Z40Knu4 zkaPeDOImqsw^&)DqTaU<1a)v>sYT z000e-n5+8Girj#Bp`TF-C;z+4zKnw4G`~wBZ z6_7-Z=OSg3?pz-pi)DT(WkHX3%Nmb!f50$(1!*(Dz#j6?3N6!($rEZxv*7-@;&0`)AK0YC=TF%BCD zOa!d2cxW66hwugeXu}W<%hos!|Nr|6U_JmCG_!9pARUp*D*!+|vxxVuXv{?a|NWm> z)gbm4-6xPiC+de5Usb0^f2oiEU*3@WfGe?@JMetO^4a?V)YP(MKTd@<3C8yg;VN9< z_#E8zADP-dy#K$@jx^DVAUm9UMV?#&u`W<$hXza>SK*2DWWVh-lPVPX3ta5KPxtvkaIbp)clJg$J}_?GEP0Z z`qno%C=#VNu2Q8rP8S8q$teXiP!-_V0{=(P@r@t42u{*E} zWLYo`>cewSf7gJLH6Qz|Ei{0T>p?l}@Sh+=+%)}_*bIq1CP_TzmqtCNUq0EreaMsqh*In}dXmvpbwJ zw@v;0Uw2M2)Hdt}|7I5%^QszXjN8FI(QSv!c@yTRxFU+i@q#UCz*{6C_L0S=E-dQK zzF(AVnv7YScjtaNJmCK1K)pC)>_Die*KDIz8!f5g#ix@P-_)S}DALoghJd?wn!XHx zCXHGls+re30>{Kkf#j`E#+*EM`4`iaKHmhQR$_wwAnOOgu8gx=!LVfwka3;s|JQ2WEOSKO9#}Ve+XF@N1Q5nc zFiWba8NCqzDZYFr3*(K|JMFoXX^=+&OoMGYU_bmMK}gT0aAhY`6+r)^Ke0O_S5|8u|MP`@b*I{gGl3-i zP(#4M2kk}D_-@$Tq{aIaT7ZxE^Yw;02=q;7(P;qdHfLmCc&`6+E!@@I*nxpwx3h z90BG<2H5dg`!`MiX{GH?_fn2$=r{@mXsJ|u*dKno%wP&mvDlaJdDO62nj*^o);}M< z0I1mSrqHY>*hYdh8YFLn8WpTVO?ePK zyF-0O%)|slamrp6*sY-O2gpDX+K)UT9YzlifB_xVg4=|E7&>AcpY7dZ{v_|iZ?gcl zc(10c38{0y0|;e$1(%Sp+*T4r>F`b=>=mOHE7e>T1l@z-9eTkN6jcO1c(DA>8OCtn z@hzpkdOEk9P(sKH5Yxftz`gf> ztJj;jP?DaJ?tGn!QMxCc~sN3N?|hqnksNqIw%b=Y{Uq_3u$KZyk1IpNwnx?l`~x zf!H&)%~86Om)}yll~}z}+cGbp0JETx>i|axifapkfgxxAmZK(MvDD4nd|>dYix)Mx z->IT!U*{+dLb7Wp&=mc+L2+ZpB|f0%5GJHkGFApiJUnkv++o8(bQKY(Fqk~>G*dx^ z4Zv&?`3K6o(_j<_J{9_D^+UHq%KVh-@^I&bpgf8lRgsUQkItDIgkHWe7lsNueWOD} zBfpzTNzE|!S1x&WHWN5CK^7ll z>@#n_J(Po&P&OkJtT6hYZd%D*)(F4B|#P{Kr%G#{YDmyz*YHI zAjpf+SN(=Y?zZ>QRFGK4V*?)CfEY>tO>`|v*j_1@7%f?`MDB*SX1dsC1pn3S5zU}M z7ixgdyDS+V<^TLj0ZC2_F3CrQX_lF*f+BRgQ(SSsK-+r+f|P)iT5M`$Zenp&b+*-- zH@%>8ENgR0^Z6qbmztgwv{p$8Js-Z509S@=N!Z)rvv+!s+{_1BxblCtj5JY^ zOeDQ(B=yxB9o-q#090`j^^Aax-5$G4F2+}pOmlvK!YJ4w$HL6X;wKqI5gcHm08E!Y zEQvBMf-p5*gHJ-XnGu;e0A(|+rFGzeR#*S3j^Y6oyg1^sjUeHvLF8~BE*Vf(5=#Gp zmid`bHih}|NBGYFj+VdC>V1LaUBV2Xtkj5Ny?i$#OEyjPG6_vd9di=%1-%r^f9eZz%xC0-H;PEt6wn~YjFrPb1#3C9E|AID0V3y0~yEz zU;ntP^>ORQ2XByQSM zsrvuiR)g_oEX!0T=~qoZ^THl7UEFD*$A*xKU`JI$yrcBS77X#g%Jn4hH&HATcAGvf z|K$Dm4;>V>XY zX7M#{@2|B_-2Ezv%QwI?{yq;&<}REAzkq*@_%H`N*IGQhh7vf?gtEk1S?7|)?=)4X z=h+Xfo)6)kfU6K26dp7^hnFX%1HCxo6s9~gt8FnI|Nr{Ld?9;N_{wk`hIAIKxyMP& z=tGJ>JN5SxA`4U3`OodQ$KU;W5?AgfDXRTft}mt@1T4=zF$QWzu4`H5MtWkyu~pRfM{ zNIubK24}gHsw*?6zp(2CHd?^9SdB?Ywha1`GxhY^*&q9bAM3Wqg)f$x8lHOD;WY6^ zDO5LVFnBQeuBI}2sjwFfQ`8{n=|clDb*lc z%R>PGfCL&j8!99uY!y%)7O5#quX6WF?Y+k+g|dKf?&!a$&4ePgdb z^3d?i21A(2=sDf#@dFiA6pc*Ei|1P`08b`(gW0|YQEt^)&U*I)8XZGD|o1f~}rlU{AA2cUPt zr}&(~+L5R(>*C)?Hd>b@t6s=g)Lxup)PgcMd_fQgkNjkB>3aTktPZBWt4>vb7-CO> zNIDxo=4H;LMdC=j8O9N7#TfYXng9Y{QEB$K)qbY>Kh^)h3M}~b`K+2r`;iuLgTpmuiy}l#$!m6Y5*2H3f49PS=2}6x=Wy#Z7qL1N@L1cl_E(lxfyR2bCkLnhMEm?QM88Jq%p-o$ z1flLoN5q;pPZXrMaDox~pftW7mEq+4q$HpJevbCJ&WpuX>7BBs1 zkB(3hj@%Y^gnfFq87)=?1%3d}P!Y9U+u0YZ3-OA`KkKaSwlR8oYr9Ep;dIj;l!0kZ z*1EbJ=h=$lD*0kB=+=tbNwxBHf;7zgv$RTGo~S(17~c{ji9bj7|3;cEtO_8^L@Atb z4Tgl#PB@R{3U6L3`N34$+3r7RYyuBmeuPkTRv&rZ6{R5gG?dGeqOvSy2h3 zt&P{7b`@do^+P>@kgzN2zexdY6N*f3k0Y}-a;Q#;P~HC|*er~#S$jbd=IBNumrsXR zFaM*r!pFK4A=lsnfoYDTvTzgrnY1m1e*|8DAPYZ zoy}cMg_*Abh$PD9ysTB!XF5J2)XHWbbTnTt*`!{Jp#@hcLK+;{Uw|y-AD`gmebFE zXlw@Pf{aIZ(_=(TlzCplO*yIQTm=>)nMl|c?(E>ms!jn%j%ifABHl zo6_oa6-2-&c*^-rxdHGR;Dqs-gxgid*|4x%!bJ*FA!F{~d!qG5evi3N2QJyXlam9*_ z`+?5PuOI@_WZoGZV&=fakI@!XaAg=3$Fe84a2=;MqtfYb@_t5dtZz79NHf&+L#cnI z{?cyH9ruoFSl-U#(rGBy9u@;rgaQed@IwYTc~iI$HAPd7sF0#+i>I*yBzb_Yk%#K* zFMkSuKrlrAnqcgI`1d_L=GZj3X#fm0nmu56ZD9JbC{KlO@NCi zz%!XPGX+4|BM%P;GD)ldFR*CbS%45NMz}N))@3Ofd;bvB0ob=Hvp6~RO6>64*JEJ~ z7VGx!VN^96qRnQb_JU7f`#Nxr{{yVxhOMF;qnK^?d;9mgpU_yYivua(96>F|YcD4y z7BtOBZS>5%vicfn_7MeQwz5SrtX;qJYK9C!$c-3QbVIU*Ln0!~2L~LKei(s~ohHz9 zHb|O}c@o8ErW&Nd^NI}m;07)_!Ypxw`y|{>a|E{MM?O^k|Nckowte$J{4y~;`o{#V zOf&!uB%nP#LCF~XqMX}b$(yKp&Eb0$x<`Gv<~kae&o@x_RbO{rpa1{USF*#e5<0mE z4C|uI<8364sMLwMS6KGBZz~;5|2UN4%h|t*zyH%fBmuWp=2$4{)@XD7%VCi-*tGYw zM29A?{hTS0pD<7L_&d2`jOgEjwGPMUf2&OS4^lJk!Yg+Cv9?zzJEatDm~O_Dp*1j? zL!`M|GtbCL^ThW{LRjA-y4hulBlrrkN@6f?XZJ<`Gr3K zt;c#L59`6@RKMR@64&aS$g%u|q~(oD*I?51pvMTsW)sPrc)yzBLMpbmx7RvPn#~2*!2HG*6Zi+CcA$QPa4dNNrRMowMbSnzrBn z|Kg+4NDPo=#h9>Ym6#hQ2afSz0gI{)b5I>VCgwAO$ZMaHr0uu=O|#YtOvr1d2KgTR z=qk;n1}}wkhDMLy{eJC>_4~dx_rIQj(C7YX{u0X{^Yj1R{~o0kuDa{eC*4#1~$X$E)6Xt(Bd=bQpgjVJmpS6sPCr z$}}J}W8cE$5aMXd{ZOQB`$Gim?7FoyuhT~NM_idIcm8fOCEj?)UGRlUzdKrtV-W*X z`#A&f)STtwGFb190YJrf(c-2))O^EJF|Ys>rnZT*z4m;Vb%eGlnoK(N(8+jyZC%KZ zL>k%^0M~b!)tUtMqi8MqvAn1Zj2lTNg+Mwu>%aGrh4bIc-2(tml_J5n_>0|24CyD# z5hx52r%P>L)fFWz1`Y9j1_{5ZVj7bmk<S=`ox! z%&s=QEw*^&%vu9f8a^z>dg#l8W0(+k-oyyrSD*#0Wv};HBw??l004kB4|0all}#)F0XP6e zes~xFPAX1Ykx-1Zi?}vqGOgEPc8Gi!9I?k3OXRb7ngvDa+$hD84(xKF5r6&l0%|Iw zD|^FAFvbAuWVZdo$+}q>X^lNY6M1~ma`vC-=n!awW-d@>0Bo!(IqUqO6eBljufQN2 zId$ZV7efF*4V988V78N>bOeG%svg6nBbFM!3jj{I@C3&+RKN=l62rNgBaaL|ov-o0 z%QE7I3hvrp20@460NBegMku^}kMC(|kUc$x;hFKImyaM2uV&5a_wv#>4V^$lQR@Hj zW(*Lr-j#KnGA_Aj7^A2l2Tu{qVpc)+1iY@7Nd? zQtAoV>TYZUz{>zugg8tL!o8(m*Les(5dPT%nCwE_IdSQR+5$c2Mj|)o4MqABp^})Q zqt|KQ?-9Wiumc_EXaF)r-yJ|=-#PvEKHL24iaSP+DWk)TNiV{=WVv;WN3U-qS-1Gh z9oqJw_=f}R0~n$~IY1~zxvjJik|1Cq`j|55aXolW6~BZk-JF$qzZ%r8;z*U!&QJ_1 zt(LdnsqaMiEOZSpcsW5;9Vu@f4OiQmpO<|kF(}XRX=a0&!&5zWvi+1jzhK(|B4lG7 z*4tObmkNTu_tWBNiq^l)$^_0I?a0mR@cxb0LMYjuAsv~R>;qyf75HHyNuHU`K}$ht zmR6)=4{`}mvEKEs|1k7AApn+wN?)8La8o`$$|F)5KR8^>flYh{KDE9jNvHq&fJ{0K zKyVpe4e$31LZe{4-uLZohz*m5gNj%Nl7FntM;bogk1?=fB)|A@3X)Ro-u~P&t*H70Ph7H zfB%cRfjm0^O@`xhQ)NN@KMlk`Rs=3V9H{v4xp?$e8eFxT`hU<+Jt0BJDK@m3jK<(l zz2K0`4$iQOC#6?Qr=Dk#m>~_ErBv7vtBV4Jt2gw3q)U355TJ>(EZO^T#NLYi`SO+8 z^FIIr)CItQKnaouRKt+xpNiOQXW>lq-C7{;tIC=RHBO**#A*>TFn}pE0h8Gpdb8l+ zR6n?)62kAh>u})WhJXM_dVz9X&IBNsLKudu6W$tsU+};sj)P$O*b2bVB^ZvR?|;ky z0he}W0)h#AZ`)P>*esctGk^eqFzkC&M(Lgsac31^oM{jAx?78jyN|?)VW|VT(I1Ybk@Q><K9c8F zDO_}cEwIluJOK~?gw|rfebg&xo&UUEL(+mV7Z<}L8$EUa1f|aaSO2JBju4^=F9{4^ z_V^$~MlhYV+l{Lk5F@GanjnGyoLHedW{>eDVJGn$7{1=t>8hqpCN#vKeZz++J!|tz z;o!e5%*u0A@beIAa->$TANY;~)W6HSAEg>~kN?oJ;7qod%<;MuEZG!z=RM>*#4mZ;P>+rG7YlSLQ03i#{L-r zjC4eQ{N3a0yEEg!tG^fSxZC(^C)@C6BACuN=ZyCUjCXB_bsz`tv=t;h6!c}tGXUI} zLOkE;Ky8%rRn~pS1FQYp3p!|i_Lw!?1es}fili1P1-azM{*FMjj~%C)GEucOYd1c| z_@5)qSoAc6`peT337CI>F+K#_u3+0W z$=ErN9&n1lAcuh=HdZ1aNTA9MSsnlX2oh}u0U$(6a2Oackrym6M6|X){Q=0#7(S@4 zL1IJ+6fPD6D)5e;GO^HTMxb#q14+}*k!#z-m#_Q9-X0u)J2W_a2a*8D4?vqX9KbzB z*jUC);<9XDDs2Poa}D3m|H10P60>DVTeC$`!x*4-Zydc7rx6?}x8S1yA2TL^VW8DL z|41c_)0tYhLP39(jCT>c->VQ*!QcR}-tOTBec-SFK0KX=2p}Q64+Y)b-PLbA2NAdo z;0zipKmbsP;OvM0_~2QZJ1XkKOG=1cpAX*1nSkO9Hzd-dz+(lEw*b1b-~kBjDP@un z8k6(l7kB8MCNY185>6ff1wdq(Dg`860>G3xs1;d&k(OX2Cm}S*07}Fu2(bFtKnMUa zu$CgcBmfFYfB--=2nGNoDGGd5I!Xl0qw8~UZ!8a0pY-XEA`ZSNkLr&RLD~Jq2Z0$V z%7wkav5^j7MD41YxLu73vJ^x_(Us?v0768GuyhMB0W7c~&p1rtfZxE(fD7HmVI}26 zwhW*(6Oo3qlYS0U%i%XacwJ*qfNc?wKI-sBV468-lyPp*{HS1cUBHFXX)(Z|fD1X? zc^`0eaO780Wzro!3GgGp*KM9yH%IV5vl$k}J73#_vN9ZsmhTKMny>w62v9w&@tzX+ z&OCEO0HulWw*6us*kj>@*mKW&gDcZ%E(iJ&FJ6{h9N`U6I^|m285oT8U-hjyN#VQ= zA{W7HiTnQrr9Q!Lg#f2eG+d(lt^R*EpG~102-M_izqkH!L2HB+Jz<1+aUGRJzM@WY zW2yG zRt=8c{rn+%|Msg|859C%1G`f$r$;|`|1;l+X;@Id8s~GqVK7iNC&yAid6gT3n_N+N zgLgBm+^PM>M#9Iq!f%nSe|e=xw=^f)OwlAL4#d z-#+;-P02r3YDGPo0WS9VQ z(1}b zQNV28%QFD$nsdE6ps( zir_+X{;Uk+SPZ_~y%(?q5J(ivX`Iw2t=0_~4i4*Ww|H5$BYsUMtc*dW(u_V&kn{(%+k zA^kdd$`1q@HM?Kn(_V~>C(rDO>>xx)0 z70;Xp5k?N%{xJ)Mp6f%ejTyZgN9`;@#>fqUNL$H#Na9f_6AJ!-WeYIQBb_`Q`i#Od zQujk*U>SY95(d>qljKrRD?y1Ff5az9h#MPPo?`TiN&p4`{WFZu3>-a%_Lvk?@Gwjp z4IS8n0ec^5g)uAT{i#+!Dk?d4@5lCLgSOK)Q2+u8<5B7Td4uOC*8+UCOZKe`SLjlYj^!W1HhIN@2qC`9hprk=_wZZ-+2Gz^4*LL`$z$ps;(iE`xYGY@ zIS!34jT}8ya{7zr;61xba0$9MUzeaNKhvlV5%Pbk`EVYR?)flve-9LtKgS!)gB*qF z1dc0}(_0Pedhf$_p+gIH8k&`?{V*2rxeen&Vq3G^E?}9RK?i|vyeR>w47qo|qt!fG z5IVDeLlV<)n^vFykU!IV;z6)|0mTW&gpcKiwFFl}|KMd+h~zErFu)b%DjZG(tx56r zKqwXo_x1%G+ANTjKg>(axC@(r-b^?O6DnQNKFeO8;tOE#YY85Ti{Id=Dk!F@AOK|N zGGzYxU}FH0Rt-OPzv*`7#R;$k8Vk`u24%tNz;CMSi1GkMH$*o<|AwGe>p!x-fEon2 zFJJHeZKW`vwO!-(*wUGabzX>x1Sx#c zxyrzB`@y>3MO1%9FB!JrKg!9^9RZ4zkV$avl`l8(_2?l~0=s z|5JTJT38_E=qo%#@VYQ#l?FaguV>(tuw$1^bocEygjKBAIus&cD*{?(`$&atow$G| zvq9=(?zbN|I}S@KTK~h9wskq|DDU1;{sTRQ zT`8(mZSlf8Rt<&wQmqh)lFgBfI7d$_*a63iBX|4%cl<#N90I;Pt9!1Y?*ty=2x3F6 z9cxGPLTn@R(LsL2E502tnr!7T$R>CV^MFPH%euyQ`Zipaq6i(+N4H{PRinYAS#%x_ zti9mEFTZ{in|@oM^YTONvr*tU&X%arP>uox3-CU#Cc&1hma(D4k&2N`&d1*+k4pGI znpb$z2`K~Pf_!L(LFNW%?w@(79n?)0@xaeuLf%tqZy?qW`0)XH`~0zo7-?Z1fxn>o zJ!yyDusB^Z9EI?5S%ZZ*0kIV?6$-D#{%XqYV%(n~h~?+twhjEe3uc&SeuK~pkB%hY z^48G>7}tyxB=mRv!3oAlrG?%HJ6*xU?@bM;tO%Ylgfq;KEiZ>Hhh;xtq6Ri9n=}` z<*VQfHb^r~Zyx;EAQp)BJHl0HhC&sc5u%`0Su*ddq9rNm6WTIv;;UaI7*XN2Q};2X zV_eJ|Yu4YtR4i!`oUW0_QBZ=57A@ln9{@o>zQ52X-Ru$v^R3tW1=(6Q=`i1*JOGPx z7k2$ACoQ4$FBKH1;tBw(@Av2Z-Yb{eZKo&pJKU`4JV*0#P&(A`eam;3xj3u~hQReT zB4Q2=wK6O@c4jUdKd{7})9R%B8rP&P@2b%I$vn1=`(|Fwz^Gn8OS}3{bPHs{F9!cAeRmW@H*87=nS) z{!jNvmB~>2c{6x=`V8;iS9oF(=j+d}%tdH`qetPQ7=hg$D&@CKlG+g|hY4gl2vuNQ zu_y^$c8>5oZ<>EG{u96J+o?%2v4-$Dv=Du)VQ}+@@@B;$@=U$(M9Gx9pW0@QJ~)Ta zx1Ip7^dzkpiga7IBM{Fxf&~KjeD49e?g672gfDoq9E?Od2MHL#Se*drx0sifiRmZ9os-+}6A zzLxlexDF8VBGCbXoR9+n(3h8L7wGRRZs~$hR!4v{a1mexuK=Nqriw@jP$=gN2&kuC zJ7TH?VgLQ!R3GwVY+LWSMk@)}8v%=hPxJfTaFfZH0C{x>-|_b*^I9M_c)}HA+v9=9 zkX8m06{gTU4?&M|tQuS$fAC@2gJ_HSMhn2KF9b-FU;@FiB0RQB<$OEsEY76u$_jl+~y@%diqA?!)Vg83shU^o!Z`-Hw z+mObo;L(Tf?S;RMtwB&m2l8i*8o6#L-etu_S$=U(F$NT{V?Le;03Ki*pN^Id)+8TJ zuMuM)SbD*9#DU{~>MXq5ezP(A^U+*t@}Wpuj$Pj~yB078<`do8cD*Bh3U(6u=)Xg)(V15RCo3SMh}Vt2Xh8aETaNogGctu{i(e)ICn&v zMf|(C8vsw2g@Jh3{dE?*`!TFUun@X>bR-$hI0EXs-fRt-Y1G#dFmXgb>GM!Ro!Aqy z^)_pI`hDm~tMAP!;sO-tDTHrd*0J$@hqCkjFQa_82GW-3BL`aagz#}Q3`YHj?CT%_ES0r0!0RtB;m+3|TAC4EY!9e7797EhCL50RjJ)f7gEYruui$~5hmoI)kHVs*+ zg2Hiegj!p$S?EO;)P6}eQm{1xm$j*uCS@nRkA*$b-v4*o;STrx z+>6O=g2*YeA}Ex=CWJ+>#h_S;iFAj_0lC;FYq3HWx4U6vNR}hu_2lNjpM_WaP*Rt` z`L)r)flv^<8UNDRWOe|YJ)gfCz;~+Lvk@Qxkd}lscW+K@krP#694))v^qxF(ExEEG z5}%B{i+6Ac?K&r4sya)FpE*e~Nrbv5m9AtXDcVSSN_a1mZdHDYE(>)+7owu%<5OPR zNP?&@L}4#R9X&7KZ2M6nhxf^sm&@g+3LvL;>}7wu&@3EGi>m{ZvjDdN&A-K~1O4d; z6>F0JAh;gQm%0L*1hr4K4Ss*N1qzSvA$Sel@I!;wf(Hh!AIXtXLzO0woq+-kGeM{c zqm3KaCpxDM-nd9GkXuic@y@@;L^Kqd(fskx>_M4+*D45C9{}NI9GYg86lUJkb`)!rOiWpb(7t)`tI( zX?oW7Ui|o|MvwB09%F)!U`}a6GJ;1Ox0$6UK!HG*0Z5@;Rk0PuUO;Y%WE2K?J!Ocx zEUX@$qk-iNEB)p0T!tW91?)QE1Cb4F=#qs?A_7ZLpFos{#)k)Q=(G#iLJKr^ckbco zEAHeBV(5gzfqT>;?b@I=@R0sIOi=W!;V&(k=VtgKdVghY$@6u`3Zn&AKm@z(cbQRo zu;P^$ZKXdNCM^Z>-Y}w$k!4{KC>U*)K*#}=!w>t*eNi`*FvVhHmEFe8OijXpxB*8T z#=bA|aC$41>;Hq_OFlRpDy@tX$}YBIV75qb{tYqZqBEg}<*{D_ti3d$BX&R>1Khj3 zzq?6;49iaQ=~5s-ycim6U$AdXivhpFU+OF;>ee_5Xgy~&=78J6o?3ZGE%ZFF2%I`) zU^GiIr)>x$Ph)3e1RsRbq_+lnwLEn#+uiq*E)Jb)cQ<)IA8=QOT_<3Hfa<=>7F+y3 zcQ!&EUsA=z$|NPjdoV0?EHdrtLCw zUr|M;l9ww@G6*0<41>Y-kfBg29v`Y9SlbjIeK_m6Vfio{)yyBtKlA{nO0jncwNO%d#>6w!0ySY>S0KTL0rN14>CLs5S zibS*K{$7Ccacr#65XKn?)~6qGN4@S+4QC}Tm0NY~xVz%w2J_%Qu}Qw#MptdZ zT}NLE87GigiqQ%eUit^^{9yiZP)w7FKyVw0cLA6nYswuKb21E_7OWP+kuOQrbKzEb z|A)XUp9BxD{~ya=Ew<1ggpPN=0LM15+f5@4QO077twqIUtWL5m*56zJk*^EhwWSCzu&}Af6!9CDfo&( z0-3+J-{LQ0*OlDgGTd;@AojnVg<5WFT!r%SC$PLAU{~LWW9Q|g*u*S?XU2xy84Ryo zZvF08vGBxe9XA;NK);8P12Ia|kPGvtYR~*8U4sF`NM3qK=_rAxE)S)AU@1n>r5G}> z93sewgz5#rV8E0Ed4!q}fr6WR=lNn$Ad&|`d3E|pMhFK}%FqAMhi!(k5#`O1i9>?> z%#;JtAOJ$gCF9Uo1YHOCP~^{~sGi@1x-QQVitW5&qEje^F!+IuTUcCQM5a+Gg~cq3 zF^&-p;0;=&hfoL`OECT_y~2w3uY=b8=cc?*h^NqB0FZ6*&h7yBOZax*=2fryaj@VS zdfP)^`~Cyp$Qm-L5mThU*=7Gv9*@)(J_l{S{uO(XI{jES4J>~DRl_8Lyd~~=iB3XaRG~p&jIM@Wl$4EW8~dj6N8%w1{boJ(wA9%gXBaMZ9AFur*5F=Xx@_#x7+v2?^2kp^X>X-Qrl{ll6!|t zdDmjt9AqAO*k9i+mPd&$(9)E85aCFF_Lgvw=(~?C!T1-hR>0sbWZnVQp~+^u|KUJZ z{cE}6k@EU!^7hxh81P@vOY!KBd-KBchJo8|hXGT~d>{AovvvCN>*3gXAB+?p+wUx* zTV4lQ`}j%4xt9BLu<8z=ktFFn7=*|U?gW{~$Nd%J1nAFJ@e&9D;Zoudi_>af1g+l? zzi-n9*K)o#3E|t}GJqnKlB3_AW#I$R>YS%}QBoep$$W6Q%mV=fq#b_UTR;HC+4qF^ixi+{|SmfV6Qe@f&W&x?Cj{J$;1$wX48_=3B~ zX2txgtog8zKDQEB&R4p;QgOx)M9C>+Opu6Z=pVJ9*fZ1vea_G{dz9_{Q~y{m2!$*d zhozoviU15y0DTNWzFaN^5(#g&g(|8EjxWk?Q-w*^bZ!jA&Xt4;9>AOVxa<3&usGQ0 zz}P9d`Bw2{8x`m%D9atRf&07OK?3*M|3cvog~ZG{0}_wM^1}oCciVEmrx0GkJ;0e2 z4j>$vy3fD&d3yg1P2k@ zK?sFn?kv0C+SbR85fYGNqG!;kRA`usbHspXstBfg@bpf9dHn3*$VUhv10(@}Yhlu% z$loHIITPdv)Ar8^Ne`s|`V9nise#nU?!Mj2uS|j4Ouc<9zur7yGzZJ{LL#A$wZivc zbUw5&kzaoR01UAq;s^}lFl#V~{uutDkLnoIEs_|2RT~X$f9@F6J(K-bBa#Li^2#Oz95PNZE=s3?uW6M3Lw~nMAh> zr-avhJS?6AjM9Db-j+PY^_G(wL*)=qps{GA1P1j1W>4o}V@Xg3LDqYPdI2;PmZ|qS zav=}C8>h@)`{^8>YhUTgyo;(#|9=RP0R4s7ZhvklP_pTb6?+Wu*Y_r#0YL-?^rcI3 zDAOZP5eD|S?nE8P=OV%8L(Pw}c^!k337!*(5t7Myh_HCX$+Ud6kYTdZ(US?87SOW& zX^o8Z4?xg14TH`~gNRXBRT$v_y}_NK@>Iz4K(3Jf*B5IqXAme`HUUTgLjwEY44vRk z4Bg!Tbp~N!&locluC`?6zL&3ufD$B#SW!mefW4=yf+6`Upa-j^&-nbvFAO6oY33sf z>^)*x9KQRCChb6Xq+Jzg5eproMaIX70B~v?nh$Qbee-+|_&dMp#%;mDjZTGw4*D;{ zJgK$TC`n|8Mg>q&l6cXjb8r=^ix;>zPGn4Sd|CW;~XSL;L)C_yM`U%c0Z(Vl>O9c1Lw%_0Khe}L**zZ#DHs4 z&_z*Pz)yw5h+hKD^)gkhv)PPZj{rj;FhN~aYX_ns4x>QJTwfKnOQ5+9_b$8C{`eM#>U9Z1DJm*16a#)hJ#1 zmr0#@#%pl!ck@iZYveQIoAy|a+DOn3O!Kv6UlHtaIXt1fb}B6Tv13T#L1J&$|etde?ZVZ1JL<< z<3GxlV~-Y(O|&r+G8B$TO))4ssL}Y*hQ1bZ-zHsu8nG>V##hQOWY|M*G|@5g=D5xn`IH(-J^WWU#L4&00}#>T(U?}1b;tWAgoyfSU1b#dQLXm>+Z zU%tT{!rz5#W%8ojvS`eJ*Ob>M&6lGMAWq>o2qSX9VL%&@uc!-yEM&Z$~*J z{lX=rpsFlnX5wM?vz;#AG-HzME!g`(0h953TV_Ndk?kS6MXq%&>Dyir8!w$uD)Pm1 zCPXTqi890LD`b0;wzk7Gq;o2QK<8g>(uE59}@z zpzE3q)pLv?q4FpI=2hWb*8s~U%C6!^kb(jSiZD|YZYy2`Y-X@1X&cxrqTs-=yMVLF z=zn~`hH(5gVY#pq<1fyxFG+uXNUXkJPnV?Jihy+h-WLYjD@7fMaUihD!ULQ9WAqh) z*|pX*bbR0Qar70u2LZNC4ynIygN*dnoK5g-uLtoF&0!O5Rja1fxwCG z%;y}TVebSWH^S8Qax%H30$$l`&@tLAh9s3D0gbG8%?MkM9YF=fc#mAYb}nK77YGPJ zfVmC=RjYx!t}==UaiS1GMyws^2=yXn*QeXaJSD&Of9;{(fdn087Ep|iawdaZjmKc~M@j|Jq|oDwX)pRbrWv^4^Lo7MTsK*3F5D21Z@jzR zw-Q1GEoE9`qX<|`p338nwZyM4l)yp?xdZ}V(DlrC&BhVOYs_iN2}$1*c6}??gLOPWMYF85>ZU4 z+8F77EyUFX>`aW+k2Q=PsDI0=@wb8ykQtP!Plj0?0c} zI5fka;L%zCWD$&kgE1q}iY~)ooy&hX0}dlRGWzaUxi|uD>6u>=2t`C-fkf3#%gY9; zj{pJziY+z%Uo!@vof#B8=w~amQaPuwDJYDm&&w3#I#)|Fe(U3p@J$mKjpmtx6zHq>c8Ou+>uW6 z83Z9ijYo3Mfp zztAb|$0}`ge`1A|yi7~PAvzGj`G^nQG-otj<*bfI3Or6*w_?h&h;iok&UOHJxUbpd z@ejAOAJ5C#%K^s*|2YCK`~88TQCc{GUR!W5;i;6Y`Fuh*!iB$-d{GBI z^vX+fE}=LOSj5;L3?!VsSrkKlBu_y0z9iP?XKQ-_#0j?RTB0^pNhViKi0B}3wJU$i zRpqkZo*`NE-+icdG2cNNo`J!QnIJ8HQ+raqz7>tWI7TEeCwmXS-XP6$(xau2EG!7! zOE-A-Q{)SK>*Y&dPr-eGuxz~zY5%4d&Kz<(KOB9h}wotJapQ(r@dvp~n%7x<(>`4ihJ{)=az z>aiB1U+8*}mxLaRmpbt5Uzay7dZ{KUG*?sQZ@YJ@3M+nk0c*0H*8;C(A_g>Tdz3`1 zv+>#An%>vUx?1Jy`wz@2d*#0klq3W4hQSgI3r$4WPOTT!aA}mioS>#p==~SCUEI$? z&{`B0`|kY2ueMvYBxG>&4-1b9E#P*Onta8CD}sBsgXpn|-^Z_JSHlUQ-y4%faN zh@=Jx1(@aCinP>i=dS$7M37pKcV|fhPSp7#hH7+_yS-)Oz*``J zb_g$q;kjN_!w8DY;Km<^V2WoodJn_CG-!ly04gE?0b@RL_iU?*Fhb~!6r5kLzkFul z+jr+Kyi{Nn0XP)_DRw{uoXp*suRdPGoQ*Xpvt`6-N|!+CoB{Z6cy_~_;77hx0xx$L!!?^^%3@!#um2ku<3L?vS6?%2XUbinq z=YVU0Ah;*!P#CmFtak0aAwfX{MN6oo@<-WzUS&{SUZ{280002>A>#}LIQ~H2KsU?d z3MZGf^rmQWvKkeyXCdt`W&IJ^-+9y z-h$aNKO4d*xSsv4ztE)#w_D~P(#K2E4H3WfP}lpvTt~e;AopXhlCpoGvWCCVT|-~v zMxBuK@))Dfm59Xik@eH~X}P~mRyr8|2c=PbaLeadU|KNJ>}_67ENcS#bJkDwS=PCB zc+T{(f8yj}rR=!L!}vthUmN^AAHg9n5PWfdWKKlCsBv^kf&zKG zAKVm8;VyEI`l#HNvdxz)EU3QTA!pZwyF8iy^-}i_bic%JMzXZGs z3|h4Y>=x~I5g&0y`rw(LRov#%81PP@@D)}r9rcvV3%gpv{Jo>w}6-e3A_Ov z^HH`X+sB9&IE*GF0W5-HdxOS7!cI&Ng?|;DKf3@%JPs5XL0vqCoN;&i;D3z!nRTxK zO900Of+_#7+{vXR{`^BWFOoJsQ93V`SYL~0m4Oh0u(mO!C77Ab=(>uFjD$=qKtqU7 zc+kpRSK5wk7&;Hq^C99k4TH`~O!z&7Nxg&Q z!e@q%fgJ!&mpYrRKnb($*VN`R9 zAo0ME5&;@uqEUER7>X%?+t1K*;KQOnB@MTem=jrv%omrAT~qWo4N$O%G(T{wnNv#y zpe?M(C{ZwG0t!*0vEjN^!oU~eInI3%lNcI}J{@w}0%wwzhtetcQe*`=a(fQjyhx57 zBV6^uNLPmuCB8q87nZH8UVT~cleH!dY`A%L@+`{D9*`%WLirk@MgpmoI&)(P2l@|% z4esmsD#ZuVrr0!%Ff<@iaUxTd40g(;S{?US5hCk~aNhjT=5d`=rhyFGWX&tXUzisn z#Te!Tk4L8jvX%>=x1myLvF$t?lJYN?nJ_cK6#?aqb9mM&K8(Mr>xvs9 zZ2S3LyR5|f5^$e7StDnGy~~%hNAD^`CocCT5hzAkGLC*XAZD4|yHUDQ-jxg#(jZAt z>!k9%1W;U{U}X{qM`I8T={S61@+h?OO3&l@YsDXc#E2EIvzNeAKuSVrIcLdy4k8)% z&>PSjfsluPZz_5q9T|ukFNlG?3KTv8i>_yqbNQS`;`>PY6+#9_c8^05oZ>B{^7OF0 zT;|L?KO*8qB-6o-7&v{4ml?c+6IsYPV)5TFbIL(D#}OBecn#U|Lsax*t#VsLw_Z^1 zL3l(`5DnhmApo0iaP>^27!8Dpat{pkX@=2QrL0(lg$x>_hCi|JqM}}9dj$qF$|y8n z%A)FDN~s*j5RwvB#GGg=6P*KFX=&B@qe`Nk@!Y z0ePry9SBP~jM`HV5-B1F!Y-NiC|s%x|ISw19uB~BfFoh$7YQO1u7;cRlav;^uPbTK zm>6@zRN0^&m|(n_b8Az_$xtgb(MBnc#yF0wKLQ2O*$Rh4sav(Ql+u2>bWTEV2Ky?BUe)#O1 zF_-`8epy@d2hkCL+EHX4BQQvB4+mXzfPN}TNbIN2XCDUv++9?z(_&y0z-%C@08nr; z27-f54(v+6Xg}+=`4$k+52Z)aVT21( zEgVR%PYtQL2kUPz6=+}3Vxaa6R2(V)`dUaRjM?J&I}!(CSRGqY*p#mMWQOSwW;ntaK=Ga>5O-NePB#UZo8kwi*B(}$9x7<1 zIMFkqu+RaWT7zFY`iTia<`(l(dDr-bq*Z&+<{dU}=59MVF#C>0APnI^tZS1~$t7a_6RaOdc#@L=%>n7w< zUvB>4gdnuaO7fB^E>!NUzbRCGn>?tohg;cdloBOk9Irw~iWw6@F()X)FyBPo_qpEz z-n6Ugs&b`>C?OV(Fkto&7%-(!*&|E?xrq)>CMOwvFokg>tG!R-s*!h1d0!AiG({3H zW3^sinnky_rfkY*Yix2bn6JI9)--6vm%qXu*< z4~!;I|1XvB;DjB~8Ot9MC;S_j-3?#ijT`^?M?=rcZdh5OQwJ3Q;s*?w^n4gLKbqDn zislmYcq+gNhA!zb0U8ddK^?fun_BzjTZtc)tV&`TM5D@4n6JK(f8V|3`56)#C>DEN zEV|UQJ9$Wwt}U<2CdQ*sCTp4{dMa2k!eJwYhoX0Z+$AD=a7YvHV z&dqU^;elCH<)#V%0V%x*fB_YWCyadhq*B|U$)y@W16vLPmOCNE+xz@*{gcl zn}}Z4;%X<-p{_WS`-j6ZTGw+Ms~;C&*gnCqVSlOECqVWtQY9nGM5KK2c}U|-l`^Zr zEMJz<;D3AZ0}Q`!G5z85g1k+Ln+Q(xaYiW17@+Hpp-YVM8ZMp~MyVJ9X#oBR0^oov zD956014qCUa!?eSARDR8VI#a*00Lk{0gzoBC4KswAZ6qS%*9YA|8!v=Izl~!``^r~ z6kpO8?9{tTG6}R{#=sr@q~oExIav9P?*L$6AK+7~YEQx2T8NZM@sVS2W3oO+4lXF{ zXEa|2^eLX4zPKB}5eQN*hHQY=wLI7mG4T(6;AKYHY9e7r-dNF~X983ASkP%@$N3d)@H zC=ZSJL?N3V4)kjxaH4Sr>)q>_9AP+_ZWvdeZp3SAC{ z2}AH7@WVCk%UO;IycD&}sI=n=Q^X!F#2+NvKK2aD*rV^4b0-Xx!2>ZR3F6*LcK7?U z1b**^npGIPJlwS2;kMyL>CY+$~oY>H&ghgjcHmVkd# ze4ilt^b$iD#=;06&&Lui2Bf=$JCZ84x*Pb)%I+B+AZbvLmDoU4g9^GpEm<|K1iTGN z1K@qMklIzw|VjJa=!k!DJvC< zkyk1dh)cFBW(#4YID=4YQOx-ih**1j89wgSE>@H(fn?kW5Fv~QEFe(PEL9#p`7`P* z^5=rY5Z@j3K2ixuQOn?qgSIvKtli5pGhEP;MWwC7GK@p!ucdaIL z#T;GG*M+FF8RG{0wQ*(8w!?}AV#05oUBC3RCJmgvcxlmhOf}8F{p%-R=yf=7ChTZA(N3&Jd7$826Urj0#w$Z4YixwE@_Ar~M@_W^sH ztv%G2PmD(xD^z2?P4(9W(pH`Ql$*3UaZ%o5|3~k$_w>v$2lTjt`w$gN>+HSp54Jf* zOr~`m^5LGai%j#+bjp|?v+c1_sq9FqU-`y>x{lBy7 zB4WvUe)NkdX*@Iq6^Fp3#J+fA$BG)lNNA`G#{-g8qeqG+(Ue^loHBK~HT?xC`5gnE zGV5C~U?&nJSTVix&;k7NJvf3Fa%{+uL;EB)pc}UbO*z0DLWIyO@i=%Quk?VB9wu7Q zx07rq&G8HeDoI5#D$){B6pA^)yxj)v$^Q>tS#FTmi&ZYA!=FV+vUjNQi3Xh{4q?Ep zxIUYRN(17*QGHE$Kz!K6dSPd?E|n2)pDv>_CU31zf=tAy=@=@|$J zk2cVC23cWHVBX32`Ov)xT`invu;Bs?i}J`rc( z(jt{oVr-0Zy%t;ulO#~;#mseny@gbNmZ(xQps4m-F#=bo{dYC+4_Dp56cete3`6~W zzY+iIWhrKO1_hjtBKH)#bADTFl3Z8C$E|^{At*o=80r_xrAa9ml84~5M_||1S7;H1 ziedoGu$;sN07(U7Kz)A!`>3fh@4{^iZ_@A&g`m8DbX5RqFv-~4I8gZUq8(&Q96J(d z@Bkp?2$}x95#W@NXkLI8W^>I%N_Hyr<*`6knHe~2w+S3bzQsf8!hES(Fwl}Ej0uPd zu$ZA;K(_y=9||x1!5h=T)H#h{kX_4`&e)6(wvPdrkAeguUidNDRq*6QKuHTcS&lW# zz6B8{W=24luw|hl(0Vgv#TJ_^eb&M>-a%m=S|yxj=w zjueB>3d%o|#74@vX0l$!#@saxBRKej=&sR824JW>;D8cBCl(ZfWsSdh2H@p;Bad3; z{+pSU+Hbl$!zd^1?^GDLTu$PFFhwQ8Lds1p5*J83MKR!O{pEZMp<;rxQ32r$Mm0d- z%_A)*Ltp)Heg0aaT4xk2!dexBvSoK%H9*W+>2=UGz{#+P6-NP(D5ws^W%Ik)TNR7D zzE<1-kP)Fk0zV;;-X4NM@2CMDqfF)fsA~5wL#cUIOl4G<(QdFs1cCttGKB>~N19Z5 zn1YI^GZb>P*m?|tLBnM!#)(B^HnT9$40^>&0vk^Ix^i8FWJoggfd*|v34^*n+o@8* z#%w9IvBH2R{|f$yi}}6(zC*e-!e1T7kH}R>RH0b3C?u@%^MXfOo2tB@OQUGk-{~RM1lAn}NL`P~m zCq9n$ZFCF_AD_G$u2yY*B9kCvgMo8eBRfJ zS}sE+Ii;6t@K7r(Ti^$oG~nP2K>Mro;P7HarC4VA)V80omKYQfIRylK1GC`Y$0**T zKOnG4BURKs-gppNa(vQ64Mp{b^Ck&b7r4WF1MmN&0moCX+{r;k*D-o_u|nR0XOBws zM{CdAF(rV#+Csg1S3z?N;Npn@Ztj0saA5p{D_DdV;lv$aUx=FP%QXy*lDJ|IsVhx_ zU(gZq1z0a*xMF$1!?&>GO#tImu6qVCu>!sE#!DGswk*^Ske8%5M$Mkgq?k+a${)%| zq_hnKrTYuzB}HxV!NGu$0#7-DjwE`EF=huukh4u53evX95KQWpe2e_5Ovn;X%1eDhvr;2Wj)qfzkG;<4nJc7FnG2&uo8?muPW9H!RQ$3lulf_&Y)JyN9go z?IL3mVTabxdd2o(lOK(B9}s{j46_ar|Cg`%c?eHO(4yu`8NH=j5+n^p%DJb2o`z=Z zI|I~5w~O#ITModB^4*6>26H4FEx3rH3>wAx$SYZ9F`+bq6C{R@@i-TEFXfk;i02np1XiEFZ^TKy$EabyS_hTMbsJBQj{ z5;oKY0lO~Ad|Mjt2rg1j_n6y}n_u=y+u@KWvLhLg6Ag_df5L4;u11OZoZ|i)Fpg@; zF!JX&EFqYu522NOK*0wVrB2|aA}n|&3m}-_&?ykc#)c_Boj4G`5Bo8VJI~{lsEn+v zORYlcpsyE0B(AtIkTvq_%(=&i^Ee2Ws z4xOe$m*zXM@@;$HaG1WfT)T)dM-M4XqD>5!!HN+cR^Av83MMj`!iI|&j{^EE#d)U6 zCz`U^w;HP!2mwfupHw8aS8{$dkL*m)8;(5NW&|yT43neay@>43A>{iObqvdX){TrT z8i|y*>BR>UJXbya@hQBw>2HFO0qSP&gE=Y&_qOo*5G8v>KFs&x$}5I|8-a?;W#F8; zW3rich6r%jd_fM_v5TOk{R!gZq2W2oWgN+4Us3EB?RSI*+O&CXDPLJrOv6}!sLe6* zN;G1v(|eycjWkWyxn2(c#FNI0@EOq>VyB9jsqr7gS=F=*SH0nwjZb%b;b@V~*!{V@ z0tpNX!WW9<>7A9de{bZ6bUj`3a{5Dyh#F`IEaa92@pkn`{CJtAAmnLNE0(+K{o+gs z1m*AB!ec^qQYn4%_3{m4hzhjvUJl<$Z&lZCuKRJaMZ@p>T&|nh9z@7;1PYvLx{>(7 z=*RlAFbwn0{rZ~?a~fDx{#%|^jp6{=F82ArWTw3-Q~hLWf>uvqdJTjBW*Blz=;lB= zGekd3Z9(1~WlPH&;3-Qzl@Q27LuvBEi<&9ekXI}QDTxHf4L5;d`598~=U!B75pF?( zRQdGTSh&2BgV`bZBN7J`Nn2I^oBV&EthfE0q#Z)+8wWpu!rCbZH$?vzW0@i8u~l^4qR88MRxGCEmE( zcR<`wDhQGjvHwRa-`92#_Ab_9MwGqi`&d9^5kJ6C$iwaFba!PNur14bk(6JKvh||u zWyh+I@kgIIFZEh95bY-|-d@>kXOMn8DOWoMUX6_GG12&&9ldt(3-bu(W9ThEM!=EQ zzYTts2$A*h_WBj*au-^OQWU+`cOKmS>zP# zv=95%h|0OuR^k{q-7GldxT5U+4`LHyrUvXS3?v~W1{WVePDR$9&SCyux(P#)Hi5~| z98Xe;if94BN#%pK(a1R(q_n<5nW9KA+%ECZIesoXe?W#jBf}u|s9<|R5o2y8+=}v_ z5`1j^;rAU7ep+vDcRO}yfgS!0=MY%JS?L3ZPy9&1V(2UUVfP#^b`cg2I_6c-x(=9g zKDeRuE`#R&97ngk-Too!*TS?_RWv4Oty?d`$Wq|f5_Uo?9hM-W14|_Avjs86gbkP2 zT|*+qs_JH+>+JuXEm!^_oLi;0c zxR5&%p%%(;0KG9&k=Y-D5ionuSZ=e+&sF8W-nu%-u(*9U4>srftiJG`+KnHrok-mS; z(A(CHUwkBABp^L+X}G~&T70zNcj>fAgY3L>9%~mVCUm+yh;XzD9%DZ0&Nv3DrtkaD zngjel3E(Z*jS($>Xl;NF5;<~0V!4UH6amVNQA|1LR^*g@>Q`y1j1rb9n+ZOP;&nIgRd9w1K+?y5 z({vLP-=4DTcH>qP&4a5=;xDP&-4j7EQ2g19Jv3(A)kV)U& zl))}N1F`9p76j~D;9Y&RVRA@I=ZKt;JCecNf^mc6Vj-4j2l|`781r`gM0X+%ITCl) zJC)vE_1rWlC+IwhX-pxE>)JcHlLp!U2WAZOSY zhR{-S3iCp+^%y~$uH=T8r6y;GBJuZ(F+1J_U_uDiJr^--3ZSD67rtG;rsBO@;Qd^+ zQP73nLyGnl69fOp?PH(Ym*7Mh5-|-0Tjw}|=22q37fcyJr3XRKZ3SZnXfJ#`<9@km zAaE!tDM-kXk;ND@sS3VK_JDA4EDoL!gqHiyNqi*J%pYKraF4oUw!V+=JwG9y4pz^lZP=%gn)e4Ly{X1VJF)Xixy=DX(q`=Mj zaczX{GiVYu#!x5*=2U<4c!JPy+*&Rpv>edA0p$n-)GT2*$?Nq|M0jG z(DyXX%+on<7)dB;aRDd|1LVYzK7r#2yaYK!2<>fj_#y7zi{dI!@lb;54bYnjd2Z36 zRCB3C1%@=!lYD;oNrsx`)a}Weq@#7B!jclJ!y@vnxUcv?&3Xnte|`FUpbD@BC=-^9 z`aN}8gSdG^!o`t%b&(Y=UpFkWE%N_1IHo$!K1Ne7y&IR8!1Pc$K_nuE1^CM>=@4mN zXXMYo^$^eq!_>y-!!=sca>CdCEeHu$?*8<>bUqgWcoCCJg0kB6UualNaiAU1OBj-X z%<}v@CW2$ZQ#+WjtvpyuK=cg*@XHrbriKoKm0;J!*fQY=J~`cpU)n z5cjM)xg^pYDE%vWj&vlF9Hk>9Y?7OgOY#dN5t~~JIdZn}M^@rejf~wGidr531$4h%uvfX zKwEdGrOoAhCUGvoG7umZ*>H6{HZmNM++w`mHe>DL8}fX-)T(=Mus#!&crlYSN@wi^ z)P>u^z!#0Mh%qQP9vVaIR*zOKa`M+9g)%uvLz~CTSWMOu2@KHl2ia=6P&Y3c~#=2G)^o?(2{z3u$nNdSN($B7*?WD$8)lDV^{@9@v&7C?Mx% zU#UN0(=R@DNMr+~C_=2XUzI!q7y|$U9s>QKLrpOOu+uCdxB2caAz;TWxw;D~SS^a3 zC{hxLfIrUXqz;xjIRn)9Jp!kvsi|x~E9Y1Zuox!dAnhulH;f5Stp|z;G8{_-1)>*W z>xx=h(iDMP@Jj>jt89TDXdGD}J8Xw??lD$eHI|z=3-}C?fEJ+uFDJ1?3J3@k8~B); zPcTw5N@ut70F0@;J#zl>Pymu8b=PC3_Uh7g@OI?Sj0e`XJa?qY)^86GAdG>SNdyyu zorn}shWdpbuCTyZC)HX6Uy=4)zbgo~V4md?UzCV)FI-SwUkQ8v3xpeTw)<_?H^W#5|Gr-jHM`>31_8n1=tUrL*pqVCDJ1 zHX`ZuyC1dB7+#M?TV`r>+Do&;aDe?D0^;l8I6zN#?t1d+;n`s}C5I5WY$he021^5m znL^Y%RGDMG%Kp#-x(HwetOvsb6W-jM@`EdxD4iOM`pGBf36Ss9!9BPkV))ZQ_>Mve zGG6>=`h+B9ux6$nN|t2opO3#(UDRF0-%Sd87ID5f_=TyLYn$LRaUZU|4j3buk#Tay zE18;Qv0J`qu-+M9c@ejRZIQDX7t4Hw^6%t>1PYM8GAe|7i=_#@ETF0Qc^DGPQm@?{M@?PX;X>l zaOGq?c*M1ud0XQGFMPxL7(<$_%;BVrsCs_SL>X!Tc0h^0 zg1v%3`w0XWj8Ec5auRhu{9XG$U3D00d%xEb8VLd3CAJ&|FZGKPG5Xd%4{YJ($(m_@0{gkj?#fb%jcHfA$C+Bw$JJ3Cn73e%2*vq)e1k02#6BA=sv3(U~ zRMruB#EtPZkg?2h%wDN%3=W%+P3*>8A4BBTA`Xx>(AadLGsJTrU)B)iU8Y3BY|$b> zJ6unvX4?L^V1pP>*t~+F4YeO=UeTYWSl_tjUR_Tj#q3~}jlr*U?POAM&~=!_^r&m) zRRV&sJoB-_V4$PuXTXQOr&PUl%kGEpRuKtlB_6JQf&kqsBP~FqoL+)zMjT)X{k2&w{myh6qhlG|lEOK!|{*-!w@we672VSmUNv z4_MtXxRI?8Z6C*SKLZ=;hB#ao9OU$5J`;FN1ckG6I|#n9d4EX)2;kNZ#F#PMmd{_y zZMTXCGT3Gb1&ryueKcLXqH@QUn51*L-L?<@3NOu`D0j(>j;0psL}ix$DBqJJ(?- zkF62r3GjuR#9e$n4Lo9DFus9+RM(5<>Cs~Mpc7ue&{)GKC-FRAP|}&v>j=lTY(Jnj zfiLg42A5yX4HtN{a}XdJp}{K^hxN&0d*7GkEo`AT>ZISA32YXy!sAJm1%<7_+(b|s z(aHYyTs-99YK=x^2Zxc;%&z+!7J^BtFr5~7Pao<+8G3y;phQRYzXc_rKTBS?Q?&|6 zU8d|NQbggjm!2KLC<5XHny=)wRKrF&Q3MKM8I(B?j$qSFdSDA~@BKXVNS&MXU@X5c zcpKUH?K7XT01#hdN%wyj#}E+}A6#bu z4n#EXBrnzh1I=%Z68F44r7xHOfyB@d$q@99z-)E8RVCuY3xMJZ$QK4100CQQ0K=hc zfMq5EEy_{qLWaV2Zd?Inc-@yT{`crxw*M1ra(o9e|7-pnb@Y`71iTr<&Y#QOe$YR>hEmkLw%{zen1`~&kR>W}U` zzC$uL<9bFp{IzlNi3rCLgh41%FYJS{LL1s>%vf7Dd=GDWXuX@rXy+^A1P{!n#&-M4 z$3g^Y;r6fbOF|Zy@NO&!<6@eu$%o~i5cujut)^i7M=Sq;000W(A?F+gxXwZF_e-cy zp8xCh3ThO|%ix`mcl}lKdB2O~{yBjUxu6>!Bdjqm`|v`_WKLW*d#K*CEpMIb8YdK_ zNlbGsKl(btpZq8yNKlmv6M{vaC+egv_)!NwNYt%TCxr>irNkS_bmwCD@Q*;c7X@4olF)cS)`JING)nXb{Uvj_X?GvOgX;+Dgn;A> z2Sb7B-C+q3u=)o;*t74iW+kr}ZWdB+bfzwv2QJsB@((bw3+Et!|52PC!TJKMbV(qX zg8DR&d)aI(<<)RC$>FAJF!f*kgt^)nf^3Id|0vX@ zreQ)48SEaxuxJ_w;GKwD2kae_i5H%X#6~&Jd*wj5v?>Bt17G{v`D^SU{?L(J5jWq&k5+%?fdHx+0L5lR-T?|$ zYCj2omgEv9@^th`9R}QM%uxz8=fTpcWN85H-$k|)d>;A-<+t%#3Y>6|iFbYxss|9S zA4wd=OE6o-b|Hx%Nx*$qKfYVdGz5bZIN?FfvxVL5U(*kI!mtpXG601TA}S4od2qQv z=wUGtric$o0|k80T%RU^uxuWtzm~k<`UNHugTa_?S|i^dSRY>aKlVIUDDV~lFa^*) zf)1zAs{-s?uPzbJL8}Kj1XweYEeBhRl!kfRbz(5@G!E3TBkUu)z;zl-Y4(BG zk*KQ*X_=8~0A9ciU@M`aHHI&<0zF3KhN;Z|A1NZg&Cbg1pNcBj;z(R<^r<)5Aye=E zRQ}B3OUJXhduKAUluTo?yE#mGI|e&n*aaeFGeP+O9#aG@SSsx>E*XdE+E1fgd`)wiT6TbPf0Fb=MNZ_D3FMaI&%^E z|A`VP;#LCCG-0)P9;HIN1}YA4K=w%%iEvOrWss%TD&`|zRqyLoC&FlPxk+$kv=ggG z*dl_^dK4GxBpj~AY#2L`N6R=4gJ8`<;L+WRrY9Y27tUXw$%jww^Ii6kT=Ljm#Bx(D!LlQZIp z@VH|HOsS;6ek)H~c$&qgXSnm?UPoD$`uV*aw|kwwL@%&o&66-%3>~#A6&7vSY~>}5 zeq2I6qP($LxQsr4vS>o=s!>)wjXS60%ossF7`CIoa=-wtW0})C77u@vylk%k>qSjC z6^(;Qe6+#x7t!Y+{t?*hXCS>NVEJ^hpa_1=#E=!p%o!g`BiEdqp*t1{@1mrWxucJxP_J6okhjAK!vZNbY45^JiQqvD*#8A{pr6P zrOw8%k)8DSUx+SJS_WJn*3SYgOBFy+T;abQGQ=VM1E6IqFm4Mn)E(rtFPvhgNb#Ej z@-v*Lv4dYgyA=V_4N1qmz9L%DGcS2pEA=g#AB6tNuyn-seFH%6O;PWH%fEP%!?|EF zKz+j0*TPFxn*5P&&p^sWo5(q4G`Gv(1(*WBT%hIPsQuk((cbz<#BFh}K*8#TtFpY!Si0DPIiS!Kv>@JR5vn6xNEU}#f zK>UG$d^1d9K2ue>ZC7wg2+fg1q=7UV_d<8ikEDV-R2md@wf1GLG7$g1Jb*L)CGQ?Tc;@k z9D&h;Lg)^=r|Ga*?!UxLbi?Ajsx*a)&_2&AO_>PTu72SuVSTF|&-)a&c|jr$Xr&8^ z8w;*ymv`3`<%YO7e86{vxKu!wxr4PWV@cTu|HXrbDkNNPz^&>csj9Ys=9;jS27#9_ zb|vHU5)?s>4I(>{F(R&AKAJxghnU2%1M&_+B4*e-^GAx&n3O)jOdK>1(71LGc{3P` z4}t$z<@v}Gbs*dM&J$yP9o!rVzVi4&a+x+LoT_LkXOHxR2%LiZW%Y36Mv#A!$#UCchk!i6z#iZV%MfS;)E4y&J#W_i#%d3;9?a;yd|2!)DA zpGETAb1W5I?)%>P-t(vBvb#(!ED2$7Q*3aPi3Omp*n5yePnJ}u%UGD|DUmY2%ov8Y z*EL)%;Y2C%!Q18e3McGX9CBZhqZpX%qwHJpIBAOhoWc~r;W?=^AP%FTbrnAo#V>)x z&mRMdd4rRuS#JP&K_DvwKtw5+aBwIj2o`5tsF}-kS-WFS5FP{?*5Enx2LsBBL3h9i zEmHP54nUw4W03s@HMG>XVB~PI$pR>W>JeIa`5j!ZJ^n$gZdZON$bOYP77X%fsYqVI z^g-HvAwo$lgMd1;MCtif&LxS74TeXFFo1^$OkWe?CPYM4_7_>r`7|6>O_X&q&j=O? z#DHU=7k_Tvxt7L7$*1N`*PWbCJ}M%Ciw4ZZMT03S4ukO)Y}hsoq>9=P zS~o5iF*4oNTMcX@vGZgd$Rc>dH+kEayeF`e8pYriY?%A6c4`oaKC1mbLVX?J1?~WV39;)!`A#RN+uXRndTDtXy6>P?46`1cE$PlK zD*czl-HYJXB8?tmOdl)QbTf4!LM13GAQ@xqLeKxdSom2%y+T&6?VNCiSN7qUM3H#E zY)_8doHQik#FcCF)@@U=>l=gJ~$p_bM<^ zjLgE0(q$fYPx*_?-cQZHFaI`Qz!6Eo5TIBzXJ^fbNFTR;TE7t}%L}|@#h`vKmbpTV zpA=DPC!_M(f&zo_@uB!>V3fO3#g=hR_3o;xr_V2ci@lpve?sWQ&FKCq3!_J)N6_L5 zkceM)y)j8w7Z?2m6+_WMUXGV9be#osZG+`qQh*|yD+oKW22}c}EzdM920qkuR3-rS z%vI@LZGr?yqXWOe$MA7~>LgD0-Ak+S?S-5D@4DBB;U-Y72K1YuK0&tcbosAGH)ZL;GN96om&2L2~vG_j&+v;8E#UM>KztD%nLM#7y)$FNNL`Lt+xTzrPd`E7?k2NFIY$5f%)>;6WW6LVGZ1%4cIZ z>hN_asGwLE5^RX%chjr-RF?>N>5JBgP>AOG8&i_$*cMy-2tgx2h!zwUnt?@B<HBP;gtekzMm#xa3ur<$DMq z=U;CCGCXWd7X!8G{va8n?7wjKW@(A8B_EYFO5}o#Lhv@2kgqGC>I!H&dP_@Mo zd;daImnWh>Ghn%1v0v(<-Q);pY1e-sMW0FX6Of5Hi_sFLTi^O>FGdY?V)+AZ`9IY| zU+Bs&v-cCrUT=T^01kK|>L>*|zhi{TK6u!u{Gio+fsZ2rAyzG1l#-d(60 z;BOEFz<30#5r`MSBG&Z(e`pMb#UvbpVNvHCOTG^S;5$bHfcZrW=M}iJ)F*K{fw}2u zDJrx+6A1xC6gFXqO$7xTAZ0Xi+$8=+gt)0Z)3d3~2PdDsjr51B18rZ08748NK$s>4 z0_g5WT5ZwalSRvIhyFsE6N!oJKDm0s1W?c5>Rohyo=t_&PKa*}&$fcm5q~B2P15=;e=F;WY%EKi{cPA=xY!S05Gv_0}Q_9J)fsVF;sz zA_x&NaWVdP|a&~;Zg-+1XDV2|NA+nA% zeae!6#TXV4`3RtKrbQ!8UCuxhQy0WtgsbMW<1Jf9(6@ksi0AR=#iVJ|uu}-h58@F# z@=qms%AuSN{76t7kx zVrHg*)d1*FY4IhGgXq4Mv=70wU7a3*_TZu~yxAHoe+cyK7tkL3A>XA+ng?MzIZ<=$ zg^L(KGcNy5J=cD0Tgr@bn)3BHfg%PmVVK2VNt_#`>t+XiMSaaOIHsX*?iV2LExw}i zV9^W)MihS;OZDasRpsEpNy+=a>TJT%yc*3i((J_k3o)_N@!o*%NN$LEuTQe1qDpyr z5f(UN!pGvi||F6oxq0iH)v&^tr#(;z}V<+6>l_|rv(Ao}0N zGh*W4c9kpf!O5*>%j4q-c{JaRg!+AhVCMhF?2#EsP5+PKUm0b@VluAbP05#!ve9=m z0GFUK&Yd8oKHD5zU16j%ilEpr;+5|fu!|>y`m25_h1je|UhtKBxGmGu{pASc{XcxK z+dLxCVhsp&r|;~8v%QD59dvuQY{8Ks%0Wyv6()f%>s$~PnHz{Oa8o_K ze~vy6M6t;d4^v;Km}7kZ(0&5KB^uxnR(qezxO;}-(V}BHWr+_Ssk4q5z-842X~(7?riOseG2#w}t>zQ=TupD5Jx~wUf0lcAG8%2=u}(3*P$me3mt~vE50OJ4Y<*w0%1;K zqER9B_-kH_#WGE|9owvS`ZH~|*z(}IAoh6({3RwCWwBT!f@Vn5Lv>#*REme52 z1~}xQxmi~(y}Qcah=i+Qcq!)*z?H4X6X%^rz9xfR((<%*eV@oL-+Z-cL`DleARj@q9hcRsB}lNcod%efyJu;)|37X>L}FXV zCw|tQ0um6L2e9)p2o}C}>&m?NT%IsYH{|lwRiY9hCJuu}IMJsq;rO^(42TPeCJTvo zH1p-qcukZ7k7{TCPfuUOp?PGy^bDAgD7dat_jYAiyPTKUGm=NmkB7rSS%{Aoh~e*g zl;juDqam)r_orOJ{8(Ze7Bi<8Hac7IVM3P^;MI!@h>{1?(SvEC@>1^})Nq919S%+% zNu!|XI$XQsmNkxoy%viB+U5-0R--q?SA0sr#9SPUv}2B+;y)vo`2`k9`CDy45*k8A zLI_DlMjGWX@`I~i3(8T14JsBie_vLGoM|!qq(G*1+^IA~31)-yjDtiZ;~^Eqa(>(2 z5g@JU1R1!Gels=hj_f@&tr-H}sNj%Ey&mQJ4>gQDCM6h<2SZItkOpv0W<#ELfda2lNN@ zmv6%HToSW*9k#oR%t#|{jrE>lMF8KEH@=SI{wrWra2E1Cp?7~0KG~rFQhTay;545x3g|gU@BDCwX0e;N4cYoj)avRKwq|CdBi6Ril(7|7THoGjZyIKp6Vwva5 zrg9$LZc%6l4JFo@C)RQVL4Zn^qR^#$D-?|lQV<{krm8u+&6C-)&l_mNsS3mh2*>bw@h;#w~si9+!ODF!LU_ZhBN$Ja^UMe+S-ripPJ-Kr4nFgXN zG4GG)2fgid*-d^pOMsU&#rUwPMBmPn64S%wt zM8~kmreGWGb9MRP>K_pw$(~nGnGG2XFeF4o(Nlf^LN1);@9=VDd|Zw}?hL{17LGVbKNz6>cpvC`^Jfa_tPOPPyYn}M{TWGpYJ9hD?Kx3mowx~FwY~wf zW7|O-cbOpJ(m&gQ__7**V8^#-_SM$0Kcx6g@2)qz|jz1 z0(UhkL!&Yu{WzOFWjz%m6X5qTb9rN(i|iW*wU>9sJ|`0Xa7_cYnbxLo?WS<;rg!8f zRnSmKWC5vwOo%K%KMF&wo1u*$eq2GOs9?nbbq^rCf=Nu4z*q!6szUcc)H)nvA<95P z8JZ*qX`tZz<`??Pn#T=GGaMNfiXXK|Ow5~&Ql$;TvccBG%WTUO$i07et<}B=XY#Xr zx9#!cZLp&S*@Q!Sd=bSfOShVsMbb{evo&qHT;5)DU@#rko0}PzrnO zum0fr{#(dy)Uia+F8sacwz32Z+50!MWhzS{Lq;HAp@>k)j3uJ7UI!LR2DHYh{J|M3 zT6<>Y{aObRt^>o!K*{g+|K`mPLDn$sdtO`bpqT{6HVuPY24X&e{GxP!73Lr8dSfwR zC4&&q%5vOs`BSi>E7h{)S%0d&O&5h2i2QNUp0%jkdsFuQRoewLQi~Kop$K;o*T)9D zcI5pr6&zQ}^*xRtmJkpHf&&KJH|p!bQ9H9x^f{9M9hYWfC4xtao)6;O0A z!({Xvs+cH429cQvj$~m7hFE0}e={flqJ&x>iVOLhS+^_r=+X=c4`B>$^sJCh>wW;{ z`bR(F6jyP9x5va-8(Ri{=@Z_oGK%?$(a1^^hai|3AXPCaD@3C>z4*6PUhH=syZUR6 zn>Khzw!^a+)BEToK_@q%%lSB8pj8zrPiRsEFm=!=f?0)6u)dP`OlYDIQ!r3&hx~MC z@ALOF#4XD=r!UH2FG?^712`xtF|v(9@qU`BF~P{VxTna4E@GIq{Dtp@3TJ-Q78F;V zoOwlZB%9xdY}H7S_KS@A0J6>wz*%-1FG zGX0+_@Zmz62plRCh*V#J3tCnVXoC{)d18IG_Me+m37i01h@G>nsGgm{LLge%M-gOJMdt{>?Z* zAMdm7Sn0duj`K@~`d#yQ2|vcv(LesG-W2!`(3CyW{|)qnNiU@B8XouMwU47T*dIaE zx?T{(|EiJ(No4)Pq}~-q?msGQev|tTKznOT_#k|Dhc^HtSwo(y{pIkf12dzdh)##I z#MCy^T;vm*+QL(3LC@$YVxE{%FrUxK2}EdQcgK?nK>8JvfOW}vxqkGVO2OHVh4VT- zS6;`E#X%E?OPDY#eqNWzNOaUgIzO#==n4%(#{qE-AS14&9b|g>C{tn zBx?>CYt9M|KlDagKs}m12dA96Knf%SZ3J*iATDM#34k|}r&Zp<{@Z4Kudfed3VZ&C zXL5m2K%fYMdhkLHnODK~(G)N9dRvR8+eVYcoP2)Zz zJ~igw#ln5>=p_yzWJZr7`xT1qXdNO?Fs`qmFmV%vz*G-rXHAOD1k;MP*4*f|We9gYl2QmP>KknD5Nhit%WvgKglaR>E{(VIG&@pgWf zT74L@ubXh${C>B?WzcYymJltXuYMwOa z3`gQY=Wh+7^yZkOX9%Imej;blXU%fgAWS;SxJKtc#J@*=wB=XKJCGg-!Vj?ErqPK- zEAr~gcg6IX^O4o*&~&$#YprdJ3Q+$Qispk4pC9V3qNHH-G{r^^>(PUZv1srd>o1iu z*?f`r?v>d7LbFgbJgesuE~A?Z7!Vy`Kt0}tKk_pArAGLFJ(A3c2#6y(eSyvLd>u$8 zdGi<>Jhj#)s2mb-ay8nCOeX_^<-^4#Z~4K&C-{ipzc8_grYeQ{HV+Xc`f50Gb1TI2 z7hf13x8{<+@wrlQ0}ppzqB#&fAEBZ{0kM#8GRgjp{BIdx(r#;IhJhiT{I6pL2#ESW z9%%j|+9T2R#7s&bVE-B`LL&ATvHMHqg%c7hyD85AM@}ps19Zdv~1a)hN#txg4#WN>@`~?5kzS)Xv{$hA_Y)4eKM8R zv`~`}QLLmBVoBUO(=UTmaVRH=YgFN;5+MX{fN_dH4N3AxbN&7HRxt#8tLd;&kvU{$ zkqIz@!Ud6WO!%UgrWs77xU^9aH~R2f-<8~7S6dh}plxStre9u$%h!ha zS75iAfrLRos>R|wAE-A>2alJ90b_N2W>$6*fUkWsvo*NQ(>`9$6#5AR!$NRST4(NZ zSC0&p>zt9KntUVLxx;_6$v1zChqBvfS^+<&=Wrgh@Qk#Q{*A&gU=j4kYX_{>ZnVzI z|3VcYluTG$lnX6)CnG*f?vkiR=3IOc8!wTA_~1i}@Ild3POA^9wI_sog4-T zW)UxZ*kTQ`;YP)HLt?k%sXMZg5N=z-2j?RVvGw=qb{Z&DGCsclS#F7)%tnOA;vYdg z6Ao$EuM3AElQguQt+?o${_2f1(1rN12lM}L$X7u7WkE5N;O-b=;`qos4)z0`EfR(((7n~LXT?I`Y18BP`1?OV-+hmk{(?Ul!1dM-!_dlR*z7(L zh+yTG(PS}R1U-f`xrfKi- zZq?yq@2{ZtgF%uB7isf_oM{gGmYWQVEFdv);>aW;5_m(@1hM$&m6<^k(72v2$B65U z13F{}pkyU~6%e#&^nVDh!Jud!f&VMmzGK#a?|}XabP5LjHjN(^BS1uPm^xgTqMiz6 z?7lsNEr$@B?r5Z{Ucn7TYyZ9Yr+03@gffs$8eeAf+3Afw;6Dm%w18smXDF>v(aqm0 z1FVAe1ui+^wr-$?xOn%gMdySQ&0QN8)I=YyY)dUB?Xp_ zAGbn5wydl3x1<*rJm49(x-0)$%urs+r}ek?T&4su%fEAWiZ9FF-UkP!a;2PE*btCd z48Xh`2LZs?Nydz8P~KCHGoTWwg>GODt*n)|Tl6s9ALH+Q91H!rf1-?ye7a?bMPRrL zCk+W<91ym_z*^8}jPbjGG*pPw{eX@}!{|@FXabxow=1>))IoRyGzEZxB=FuQKtoG2 z9#f~|OMboe>WX08zkUgZ4#>Rfm4Ty0whjkvGyIglj-Y;zwGAhmsDe?^a*>y#Roo

}mOLTb*$P)5g;`2n~o7EWrZ7=w2FT&BjYfYYB`2P6WXc zu@Q#Y#cvIYd(+2uXLlJ8GJE~M(Y41)T(_{SDHtRj$sCqz9rS)jXUm!qHJx~u{l#@4 zfSE-dj`+ZZty_BsY?d^5dj>2e5rPo0j&OJY^VM9-;w&2n$PPdco>uHeVhBTl&OBoe za6Db8UKlRisr(c%1hWYWLlr=F^!HATe@A(4#E|9*i7CVi9wculM_@b%9ZUQ^3wzdU zzETblHxQk}VH~UcCI6)l8sgPKg<}Sx^4Q<0uvHIrN9Da8{y1q;d--&LR)8ZC0)TeJ zz(O!8M$gN&%2|8^RW+)V@CNwROQ|5|AyZDrsS_UCJ-o!ti2%O%F(QIK?!ordw^((h z7irfgXFpfVwpjwBc|w6uuqzu5r$4wy&tML_+Z;Uc zID=si2Rn~|`VCtM!6NB(mb^5DI!aN z8LL=qyPT$!yHM6Kk=$+Z^vdZ3!b6(4Bat{l2p#V^Ibwmoe_~96oH@L~5vb;1$79f0rfq}h5nQ7PN{68^jH1Ar(^r4Qm!KfQZvs>Zgfszs9JzM?1`D3( zOu62-@Cr)7*~Xxl=KY2uNBJqqmxQjCm0(Zor+t)nht@WP#;8vLqbOS zgooR5$S_kTS3tv(n)UT2^AEs$pSZk~^S<9pX?!JiXI(DTB_OUCTo^prd%%g%SOXju z6f6yyk?9$>pP7R$Rg)NcKNa5?oQupKp<(SIVR12jJ57<($Xs~`C8BT3rE@d0MWLZc zz(L+S_7XD?^LAwnc$%8QLw zyFT!+kb}u4dMa|#+JZ3Ap8y@eP_r%KAKfmW;NlEH04WRvLQu!P8hox5Pksx?ehtmR z`5AmEsy8v;YqArJSc@ib|imE)D55gRbz9) zUBm?5IhT(|=?*}ru-PRrQ=0iUc$8Su zYq@B>g~QTff*7E%9zjYLw8F$3TJidX$$B$*a`Yr%TTupMEj*YAf4mg&8G59Gdz*XK zXFd15*YX>N`IdqGJ&!5rr(gv#k)qOb)?pe8^2a6QvtQu|OeRx(40s&65!1p_U--XZ zv}t&}wqi?5hMNHZUQ%7JUr1aV+O~q;EQC?95s4!#j|U^l9R<)!v{Eu+M$9svesi0| zc+UyxZW9GvgSY0J6V_g0kC60n&pp1U>bl7bHIK$0r6~OeX{7_X441%t(`D#siqQl% zLlZfyz4RoZF<~KN`Pu6)8lnRP(mmo`!ECGp3-3t^9mb7LS#pa?>t8Q+n9jmY7C>{1;Ceh;Y?x)vmS65 zA@15~^gd4~-tpW`_`^z0oJ9d5j7r6O zYqist!Sn!vATF?r5Wpj9an<^5iwHOg?C%f6rwk8rXTp`$2il}fOBdL@^JneoK3d)k zBK%N$d>(5;-tW7*S$%m33cjlEQ;g&`nXw9k_34^!3 zaQEY8*XRYT$W@4$sC5DEVJ;6@tK6>9ViHY}?S%*u?ydg+P{4pS|0z&2G9WEyckabY zAGV2y?|#^Z2KGV6!{=ZHO$>#=TZ*7`8W8Q;b7Qho0ci}o#*i_SG-DN~qho$pSDr+T zE6zu1*ZGj7yXM`;gnoj}L8))S2ZEI7djB+1TzPz8Y_ z_n(1?7dre8nJO?^GQ%D%EkNXAA~Kjsw+&N5(XNB}Ms4NXr!Y{$1&tsAa0*&kCaNvf z&e>R)oHa%)`(cQvfT$qnX)tSxEhNo?2qcKGVb|(;^!+Oc!8hJp!3M{=4G>UEtiJN? z5PCpI0ce4hnF6qw-e^)SjFn?kAV3zlJ7*4%8!R#r;FLe+ud3licWRWmX{qMIQcl5x z77|_-a5&#?g$MTWKqlB18AazFhMXxF+Ozb-e=GwCt)%GA|4N19eEtR1}On*KgD(>$brRk zzPGS49${b@1%^J=*>fOl&RApLml{JQfDD!|&Prwo2I1fVA-o;4S_Uh3Y|v;n@r0eB zJVTdwulJ+5oHGQFggZeoMty1CokiyL73x>Lb{4-c+!63948lPmxJajmyeOyhFh3b6 z#x$InZ~Y2Y3w`kCU~2(xBrY&}$2k{TE#oZ(!Gq_^2lv5stkap;J)GQA>?S-QUXCVw z{+Emi-QMo+?|D~^5N~jYyZDtwW!DiqPzT5Wm_nQPD&gT-fk%Xb6Kv@v@E*3*^+-OY z7G1`P08^j|#1@z+V3CIfHsnj1Us}spHGmN)6aXur1m)phN)5tr9F}@@Pq)wkN}AW{)0Amy(-%!n}~hNIm-IN17;IL3`0Ffhe< zhgg1_zNM*iR?t5n;$lKvusHB88 zyaLHao043?LI4XsU(8l|rQyAlwIbUNqROaFET7{dwYS{iCZn zckvBWNi$@t+YR6{n;2BaE=Qq+gQzD2ez&tE6#zXJe7 zJQE-g0SIR+7HAP(2kYOK)O18!EFu~b92O%5;Y5m(XT6?Skva3p=<4p|Ct6cP5`ldk z1p;6N3jo1_u^{BI{I?PH^)hoUki(I^Wqk}1z!mTg@C`TxXcvwMp;#=z>Q}}i@s2?OE(aD|y31QL zBP(ED34#Tm)zw8kbiG4ef0->DU?HBz+xlnn`$G^v76Ld}#WqFYbkfH{lxcq%Xj0KI zUBpW9{822kQU2ygv&8u?H{JMle{#HyV>9DG#OdkRsj3`yYE6 zjqpVnCJNfrGi=~}U})cU$TrYiIsmhQ;U?SU{LwwjjJg$CzZY8W^gv`54d!*^gN+A; z13>9yiua1;tgx&rATbM%2#Jj*+NT50#}KKWso_=$!e__tzw2KJ1*5~0HYt<}#diG0 z3-aaTiZ9$4-suD{ovhOO4&IN(3t=Vn4Cm+>`R-7kHhH?`>(tP379xiLY6>E|M}P{s zJbT{n9*}<*L7JIQfzlfEUk?rsg8*7#Ku8=azdk;DK63aG!nkDyLgMT4q^QOSLTmf$ z)ZA=PN!(BvfN(Afo(?~H$i@voZRkI^BX!m+QRp9zTU7r=mF(ne5OP0+nS|cf5@Yz! zcOqM6aDY%V^p=-AYyh%=5|OM}ppZ$A^6a%!mfTE{f`WUu2%R{x^}>?yFaQe&EfpoF z4gQYDz1e$;1Q!HtkST%RU13yeW9(c3$`wNQai2xQQj_~FVC#va5MWj=Ba2TJ19KtL zB0FR5H&65sg1E>raTfc}fz`q-9FEtE-hvp>!WzR4QHOC?(h2>hCMxF=t$#=zFntR^ ziRk~@{@{QrH@Yh{C;Iyl15*UhYx_XXMXlJ~jPFP$aC(Jl!)w{x7&5OwjCgbzg*Ciw zs}}}%j#U?Cwt+Vcy@C){bIb?fZ~8X9itFDMb_R!&4b;^ur$C$rSnv?^$P2+3H3lI` zumP|z&9Tda$AEOIAwzs!uYMO@9MIem!^40TO=mMvG&iOAp5+n5it*eMJ_wn8*pEP#0ZrxX0jR;M0Pey;bA|(v=#f9a zt^lH(V>O?dUKiZ&DsGugATKcx*1rYT4``oMm1COdzuOW(a)huhq1i^y zx(lDIhEpd_^VZKTLQ?s=|5b35o?nF(FtiJzdk|&cyuBRf%5w@7XMm1VZ{JNipRZb2 zC<}`7c&dg&Qk#yqkH~@=f$UPCV%J4kzZ6?*N9J5TA@(dHG1ON)013j8TF+EN+FzT| ze?%G%apXVx8ir)iu0(Eo-eI-eEPiqp{rBAdZbW57Kr;X|0ZP@(qUmCeD9dnn|L~Sb zQ^MXBo5uyiH~6kS1!l3!F2CL# z3w%J0EVD*hO^yT?Kml1NS!If;d=>%R3_ecGNz1!el0UCtPJ);xHo=^gC#6!`fgN&! zhA!Zow}nAUg8F+Ii2j@z^Dvf_0R`14TYs$yrpU`$?y=bX*uh!CAjrUi5O%;0#2(qi z*jBH&J76#!hvrJDP7^;rURrUyR4~;?d;h918;A^90YwN_ef&Le<1Mng!Vwe>`0Fo) zqBI(494N9mAoa^a^51ErR6#b3^C}vE1!(zMu09J++4pevT1yG7I;n{{AZp~vzZooL zqw?*#;mD7$M+?RH;c(bI)WV*AUC#v}!a*w$2lxYg(3|r_=k}b#mW1|#0XA++>Ls;L3#>;q4%OF#;Z@gm|2InPXY%H6Wh*`&UGm-A%A%AEAwu4mI&tqUY=sg%aDUXDK=y2U7FwmdX@?`6G7>-0emHFJcMNL(E#Ww(#)zg0tJYVE zg;qq0(BQ%6$%(BpO_kgx7iS0+l@tQJ0YVB-a-t7TnCwRwP2BRM9^)N+nU#gW9Ix~t zwOf&N5Adi~I0HX#T9_m9j&TT}MsAEH-fE2>OX{xUtL9o0O(biYT z)yI8=;ok}^HX2}{L}=r8H4jf_S8m_i1KC)RfSob*Wj_-BB)uYEd3NQL(14I>2ExWI z!Jv{=^4mdBnAbNQqZlyiUo+hbHa05+gI)uLT{r}edHNm{vNf2OlLM%TawQ|88fFiW~3IRvOD*!H=^aW3n;q zdL;qMS22J8VV1Y`Y}^BGyHL!&DD!3UU5CNo7E!d2FhplXf_M^;xyc-p8KAf`k_U27 z96J*N6D$ttTf9l z_53u%hZ{VBuCxF~FlsT32+D>-o{NOzX>u=%`G7iQ5Qr;__jRee*`A4T2N#SRwEMr_ z#29ITH_{x&UiajAdcc4IWYLsqBiDYcCNorhB((SjaoF(H-%5|r_%GrWqx*K|GB)>^ zdkL0#5id7|cD1QF4qZGGaAd`>nL<5$6=GRe7)6@3lRWx9md;VHCz`Z2a`XD0HO~l437+sVV)j@8<$UsF z3k(510r(he5JD!Z$jgOmVPI@=HB&p^!#7)fU7e&j{_3=viHLH$j9y}$m5C+5k+{>_ z=8LP{+_3_So8Dki7if_*q!fVqrp^P2^tc>pV@qG;+M zn*f@iIdYa6ZYVbuf#$qhX~D&LW+(>~eSe#%3@{@SM2W>lmI3Luk_aC#Ja+w(*ZM#k zn-Czjx(DhDtplm1Qghi-%oGqP8G}JcXgp|QF~*~84mEJLVfr~qr-M~f@i^3i&lit9 z*b;$2z$k==fL87udu-KyFG1+QSTD$)3B7ZH+oh~CG^E^1o%nFxAlcQg+c`21{oNF6eQ^SN6Us!1T`? zsVISIh+yol3Caj#A_eS!H{lR0I673cxQ*pa&2P?xwIs)UBUg@!r5yy_Icz$HTsfm) z(4$BxGb0J0l6sF8g#a02y{rLk@u{0r^g3AaLVy7|V8t6;8#z+I98prX;oxRGa5EGh z24*lW85%H11+KFXRRDLO!!Hh_T+E-sLjF+=$Iz*JPkq7`U6!JDuE&)tHX?GCDYBG# zV;ZYAflvse{fe23jvq`MJJj2K&q8Q;=tP!m+_Y)=D zLcU*}4>zTv-^_*!qtf|kT5K6{a-Tr@CS(l+wo^#cBNZC9ofSx+4f!~j1Tw8r=jf{5 zZ&4NuR32#tAHAPr_JcYTs#ni+0S@U>gN9Z*;cvZvm69<@$BpT%bjk|B6VIV>JTH}y zor%S>_PR+R?^-Dcty-7xy#w)pug}hvPwHY%UvK5T#(>fg3{lE(p)K1@Exxq!uU{ow zFg)+U`x;0e#3F=_h0sIMKGO-HQeUB9#;3qTTW^^CGL`;;_X*h&J3IqNsTgg1y0DI~ zpomZ*2^Fz_486_&kI;R|`FwI5Boc>#NO(Zb-ll?#CSrEXB1cD`vmx!FWm`DyrEA_Aie#fHaql(~-9 zw)li4aHAK}=w>B5*?Qj-D3M zIbwcRN0TC~nO%#>K*S=;((5BGj?f-HUjwj&3R zGxELw@B(0j8bPrELk6Rz6iR77+})Y4SUN!2V4yw?iYh#CEwzRvlrsZMvgjT#z(^F~_acBqAl-VAHHD^cddw~( z;r^;u+vrEBVBu6-nq5@-^f=7~G2@AnN zpe*3NB)r~C)Fr?5R|!eA;VmGNHmPN{q6m{# zG!_dWA{4kic)SrN@f^B;iv+_#CNBq`9LxqF7+ND@lfm<-JYyDn0i{#~GSc2Q7vnnxH~Nc5487y3RgBNE=jbv8F0PJ~9GS-&PJ zW5VPTW$i^hfR-tULy(dO;C_gpb`ES`1BWlj-R=ilXdNbHR%1}GaA8?Wgb}b7BVnFOjq?$@^|MnH!SUA&Op|59)%Y&GZ<|KGj zG}*s?QonCAEp+FKG4Q6L-xlUW{ffOZ$V zrvsEVW|l1*%oYZuUnLHOP&k&)hF-mV4kiLukz0HG-8{%eppWhjmky58$0hLKQEmqj z)}ZJV0qJ@_F2BGe^!qJ57=RUwz`$uy5H|(`H+%*zTUkR33K?L;i9(%5pqplIk%ck5GNplj)GdciWz?|he0^;pg}{9JvJqvs>8w7Wt^F}(6Zv7 z#X;hr*g3!P{1Ehk_ClZk`cmts%#pM-v}%{%ywKo08U?aI>S>oA*SzD`8!E}9ivI`H zRdAF|h**c|jD0g7u7;lGxAINqUW+>tyXahm9M_}!S$`z_A*c7gtA|gP_L^x4GZ<@^ z_bWTo?(;`>5W{{JIY=VOp(RZ(XRGfKlNqvrV~5L%#(l-YrAs|_Gdhh{GpJ^fj&$}H z>1L3|Ux2IY;rtpXY0ap}!>9t#K)bwF2NHgl{D1WL1i%77GUUy2VyKy|XVL|2yZmuK z0ma4Ity@8O7{M~Jz-NK@XS`Q?-P{jN^bc=7v;v&~t^@e&bKPf|9-Qz1A)FO`{C&@! z4(q`0CLygvvNK7W_1^*TBvv@e4@c-);ePOMCc)S|B@!YFp?Q2{7a-9^m|;P^tTn!F zWx^0(P%3w}Pb=dPRmY1DNIa*dg9IJ-7$E!k55%Dmdj0)}V6ZYVJ;93O02m$Q6YIh6C}<1GHXp3XgWN@z*JuKPz0q5+#dm3=o`Rb9*J(Y=Iwpm9oV$ zh&`1D8IZxd*b!h>iYxpdzr-%EVP5?JN5CWJVG`|1K`95mj8PFDV@w#KVE2#PefU=h zhaQKHzx)1Ia52G82eP1Sfkp<=j8hKaFM1DJlq(nI$mU60-{buSaN0)(J1c_T zIgLLV^wVL$76S+RM)Qk}v>cKcdRQyUoDazd`xD^cIev4LBP?B{v>x^kCl$SdSp#66 zEf3!Hv#$)|UG0wzv3^X>hz)64hKopH8X#a;#+w`TdLIBQm<}+3a%4g9Gbtj{2HPwO zNF!sReS<>!f{r!sXY3}W|Np|~V%xKn@(a4L1lXqM7veyI&xsUiU)R3QgFkw|7CIz8Qic+z-il zW>*&$kf_U`)3rNi&u{7F(hp0*2~sfC%L))Vm_)eZt6yS{m-h1Z!~q-vonW8>js?IA z$a;%`+Ciyb0M}5PVx6#c#vuV0Kj7`*l~xEXc(?ClWs9 zzXZc^F#*Oi5?ucPN}6M!NOo)D{)^}?S6}AYxDZ0ycTr9a6Q_*0xHL(4f%iq<-(Uz_ z0~XNgZx{}vLv8_fvd_+^W^wz`G*k~F9iOA#kfI^_8ht&}d33M>(12=D2>>pL2S$U| z*sMOWcNLAmoBrRb;cxvF=pRvp=oyoK!PqtpgYvzWK5@Y|kxnrXkixInx47xd0#I)F zk1!)UUCh3&$&sX)Q_EKcfp!)I;q$QsLvvpO-xbdHf;-~SB3r{}-S3;&1S$EA{~&4GjGh3SQp=g);%T=VIu-lx6<&Ej$|@7OQr;%s6r@6K*VAS zIMkBo>!9tD7*$KlUa-e5ql3Vlc<@?S@D%|-sNqiFWv&uU*F%+&?}eb3bO&un9JQ8u zQb7j_fOcNUfD&&pnXqOo#9}$kT>J&DsQ!qBaRVSYwct3&JoXHXJ`g!Qd#D=^-7R28 zDGe{|4+$}6+U;iW$NhyQU+?OjutG3F6!(6IBJTgtm2o`*h9WPRkf4J{2DuWPw)d!N z5>Y^dMNbKp{~3Ynd%I{a4UabD2Yyddzp}EX*}7-1l8vX+%YI|-0dWA*2bI0#s4yDM zoZYZ6!KWHW;<(}`D^wq0wiJ~GHdy8b@!Ve>WUNHwsut)SyOfze%z(UM#X=f_(04UH z(O95vz~*rfb|f?84F*{QERR?1B`x)W*nQ;jwuxWD>{167=?PF<1frmik2J~*9YX$}+LgT|Ia)gq zAbH1+D+1%%jKBu$WeGnDb#UX^Z`qY~=6n*X?J^URh*UrbG{ga6lgUBN98~Z!)tO;J z(KzaMpB+ZG*a1MnU^pPs#?&A4g@JTzt*ny}dxEg+Yi||n6*UPNHJ`c+NT65={3??7 z{T3Z#jM5B9nhPA*{)BguIV=&rXhooC{$g3j+3FA~G=4kh({vA`7qpBBC_RCYV@%K= z%_Nx%>G0gRTG-+avNT;dJ@9$c=<4Qw5iu{oK56WF4vr3tAJY%om?7z(FR^uw8aMBT zlwpc;)#xqJukph=DF9qXtVO&1PH?}-^4^Ua1_(_xk0AbI)OP$=7|PZH?ijllrLpqw z%UyeFN6T?~ ztiA&FSPWn=3mUqYirjf+ z^9761DeNxDZ2;nHh%ujSdyprI!cJLKn1!tMLh~R7A1^{o3ff0w>6ZQDVq2KB*4APd zz7l+QgqgReY&sVd67(m>M-KT@{4P-1)I_fM6Asw9e+b-x30YtAPmOaA_x^%g+7*{N z9G8R@pmha6lqE=G-bB3vO9TV#z~uRSGSt2tduSMe1Ps9to*M;c8O2`B0vH`aCeQc| zvR$@zwr4DK?CMDfDRd~nvd1g8kDrm@h|C{J3wL+cuI}&v00Acq)Sn106%f2xEQ zY*+fIOR_MykMvatD=YbZq_>*?`l{)2vA*=do5M7wLQc22fslEUJ3}7MfKn1@Br60}fgF^7%OBV`%P@z1P3{BRs zUSA$e5)>tW9ID}tCyVs~!NhZk|Mgc2aXrNNp4tUR;Gj+=t>I?l_0u3OC?3F21Xe2p z)2p~0R**1jTQ_ybSkSPt1BCPy;`lnKlBH<%cd+E;=!mJfFWL5t8zmG;i4C=vaLqgzkF6J!JYBeRPdZdAJ{$? zPrmU49VsG`!Hm_Ui~IOha6a$1%(WQ2_Z!D{EwbT*T7_ zg7|nn2XE%*`9YvT7?b@BUxD10veq9k6Omwqa4A6nM8Wn`)OH7QJWGtvw_O0DgaUyf zY{Vq7$KG1IFo-d21&!pWGg$La;QWFoG#zEfVjy?g51obOM3T@_u&bBS3!cLv5r*LE z?*E~R#eNc_s$j2#eYMVTv}u(KJA^EL5j);=!P}*WVo2F4Q0U;uh~ui{=p{)Kv}Hc|L{S#hPHz8so!yw3@_;p|Agge|+>|Z=R)ZWxh3gsLJHV_CM-2x85~yzj1ODCJe8LMaoMCP8JmU1w z{9a6J__jhq!_~7u5b92MNfF#OQ&?5p!4iGJnP$Cov z1jILc`}3WYj6Hpm`ruTB=7c5R&U7{Sh4aJ01Ym4E%o<_#=J!wW%eQOBjU9q1q&xcx zLNOUlzq8BXeFQEmuztrc>;?@63PFqMfWdLYKz`J=I}up#h0FGqUj{5h)P!w7{j?Nj zEvpUA{9@Rq3Zu!_2fi~BDD$(hm~;(HNnMw}kS0H^SEtM0OF5W8c9!2i2ULbK`fvRl zV@UZ3J z;<%?2==EpiK^T(4Q{^w`&eya$RArNj$pw&neKWcY z1?&>(cQ;wV{=0A-o_OhCxccGy^YJGI;AUb40~G!wh8x+<_K$XwA($;|F8B}bSQjFn zcJ9}g*dMMs6$R4tp2fZHVOI?XTmMh;-VbPi1q%SF0|590R01XlCF6cGL>}4I_Qe@c z#U8r$axwfR$-Kc8Sjpl{4ASFwz`C|vcRhwUGh8f~m?fjRnD9$kgKE@Umo3~nZ+Eut zG`^31wcs=RFTn{I*qH3G}h;$?> z5edI67y*8VmiueKg@!LM*|FeALU(}v$t=#etTg%xDP;QX~do z!d)?2iLh_{VBpSkmFYfV|Ng>7Ct%M;5L~#*cO|kge_d`y|G{6KedX^g7@*_nD@lAy zqx)V%bDLWf=pWLR^)83Alw>$yWbI=V(WA?_Sjp!%e#}$%mXu=zDqO%^Y&x=d1@L@9 z$5At80&~cN=c4E`!iNpX65}x!0>CEQfcC=nM+z33u#mND*#AO{(a@$h;!Q{B4!Ip@ za`)*d5+M^a{Uks&b7cZ_Jt8LEG8=plT>;&(Smp7olWCPhmJGiltr*XVSnq)xdjlVCrZ(xnmyJEzAvf#=F?pFJKz*4EPhE7C^8Ev>kwY!Kg#uRDftWNfIz$2(4FHH62BO3gF1}!3=E32KJ|HnLRD)6hV&*v)dBPSN zS3o^oJ(A`1GbmOZz)=v>;@@urzzD#BxIKSB-(by{`9$mRO__O@nRcj>+olb~iqQ3& z`WCPJUP4!%eU@=xwRRN)LFm=Wl8cKA4Pr!4L`D!@mSj$Y{DFdzB8IY{JvV=*n|xS| zwP0sM&IDjaa|up1UmA;m6w*XnKHA?y#cjK+AMkL#AqzL?6rzMmSFLf=Ft!h))7T+k zLnw;|ViK2uAK~i(03byJK+&V@<>&~604Nv+z#7SX(Vu5mmc}4b|w)o><92S(yJDEQFBYlb;UkA28dvbW5@asu{b$`G;4B(p*_yG?< z)&uY~0VE6psKp&PHpB=2#J(TNX!(?1V=?j56xw&<-HXSX_gs9-(B?#8x*v*AaUbe#?%b@kqaigMVJ{#IiPZb_&2Yq;n2eags!EA zI3>)VqGkQP21SEenvLDSa`*Ra<`u#lnkp=L0bsrZgrz2(_6|4z9{`L&z)Zt%n`Yby z4VW0aNQiY3Pdq6AW6Kll6Zr55Bn$d$I(0CjG6$}a@#gEeLWut(D)7J_(<46Bncs1Wu;S>E|&(xaq^iBD1 zX%N{FMkX_-Wzl*UXMkM5VS=2>E$X)0n%)$qweksEs@PUohFKG8(O9*Y`~~y{q}jQ7 zXX3^{;$_@wU=+3-NUAJ;QT*5-t5^Aj;V}jU_yUc-yRJZhBC`H~2mt{Q7#4ar`qlv{ z9tgw(g8d`F0roKlr2%va!3eQO0et1leCn>dE3k?~{IRB3htg(Od|5>S7RH=VJ`<*X z^)xuU=}- zHaz`~s#eLlgZu`Ul0u<=$Ak}nq96tW4hjPYR~>W_;sAI$zcR@@H27XejeWg#S%d~k zi6KChh@}$pS}3Z_#n`bzb9or7Prv{G5E>!zLb5f zF8OeE;gfjZ<=R29K!k@HC?YuJ1Pe3@NXG#SOqj%Q%$_`VkFd|h%#r?VKB{Y-hge90 zfdDwr=C_<-@z*Zb!Q8K55i5Y)A}@-2+5el(@L|98R}K;Aat5&BK1?DN=KK$4jZrq2 z%jyt9#t_2R;61y(-pE$hmfHsIl?rSejRF{CD7xWMxKWog1=7}dI4@2x2epf9#HL-; z(Ser2Qn26(OJK8Lqf<_@KwrxbbiX%|0{jqIasnq)3Z$nVBXZqkhOa`bu2Z&q&{;>?IDh>7Zk8qav89!Uy0PQTzRmL=C@ zPdIe%b?!oKcl_D>DfS2+VNCvi(Yor5i4-WK z1*I0W_ZJ8YDNeCUd(Hdeh!U!=e9j?@cEO}b=f(d4qC*iUz;dxEQTYy4gjA(V0;nex zK$t5DkC5X*R0mQc9IEvi3~R@92GhQ;8!-C!#`3~-pV?Ea0pP}P(2>hZ%1Hwrta`Rv zFS6Nu!~OrlZ(f%wSTcfjLr{b)!xk*aNl|&5j4WRZ6F@A82BLyiazdc&#X*7IWX<%h z1XBWC2Ep>sU?;|4Qx$N_T2_A9T|wwXqDKOPoE!wgAhb_|XdEvzv_rhfTY!hl#lU+q zqHHFVsLU5O{J-j(G6g^nLUmdZ3|AE%T1)A|V^ha3DI1QgC8&2?>Q&kUtNL9KLBQ{{dIo`@Xc`CNC>jTllL>jF=rKBf+w665N`d@4OqHrk zA|(`AQ#)fubdAUb%9Ad40tg ztD_%W<)AQVt*SEra5kT&Xnz2}7$kaN23xEszx_YW{W1JMfwv}pOyBtZ1$zb$siVuc zfe1TzL^MKzNl7*}{q`9IK)4l&?zc_l-a2*)LIOxKE5CzMTfLH+#&J}@57J$&Ng4{h zm%gh!NDA!8o0fr0<95q<(1c+`#8~eNtvm&q(*5$SW*N{jgCmJM61Y{E>XDg z>-XnxFJVB4C^&Vc_#4@5XX}paO{+5ogZSooh3I6zyUcY5-g?h_JUKFTCIX z>?9Xp;>n(G5)dL{Aps(^&NV>fz~MQ;%ygh%_x#NLI?4gV_ty`HgvD+vz1|j36rvi| z_5a(k@ew|vU!MHKz86pxS6>Ez9-y*}&TbOMTrTtY4vLA!>SNrXf-;Oa7l zs_S1tnh?a*pAKI{!@@y_-^LSfnFtXnmP(x)n&4t#OYXQkFiPGB1vsuk{RBVPFSNse z_*!CH%^>!{e#97XA{7ONR*cS|&;wfa7r-O7qv|$ISyjvkiN=ISFbApvXkhEAq#kM@ zZcuG*n?1jSX?Ri~ZI}7Y9M*G3HNu2n(o6o33;+V=fN5TbN@0(`tFVy-ai))ED=i1a z53yYZ^Ai(5t!N2o_=l$W94?-^rw0qPmJ;SWW+>s$_McL!);K%f|~l~!$QdfjEC$)Kd9B41BJx8bOP@Ln^6MjptWqHF|*KT*+W2O?MQn zY7zvcm6Vh(20JEWGaL1pM7XrtS(761P9zRhf|M9{-Nj-S7xegfiTaAx#+o8rS30b( zgqs!%hKV8^Fvw#mYkz@?3}f*f9t~#%Qln8Xxr6mG8!pBl2UjNA4Lk?(t4%TLK|(Ux zK?ufQ;4pI%#UQ}`ec%Pg0l>o#=Cz24ikBj>ftq+67G=i^c)yy!7=byF%@QXAfeu8f zjy_-z0|;8+_r5M$r3c*|x7UrqVnm21LjkpxPvef=-VTeCh{#yoGz1W$B=%;}Fni7^ zfx&Q#gR2Wy^+DS+5m;IzU98#WE#%_3t|9HAO*T<@n;juUvHYT)6*Z-mz5NKtAYZzE zY61mAp?jS{Z^T)B3bA~J9XMnbP-~18 z1J75;G@r5laBRKL(Rp209lKCgKXmq`!(P$8Tkc~V2yP%R>$0Q|>n-E>U{=No_CD3D z0a1-ZmyASi;bdy*@VIN1O@%yf>T``@En%bibV6uy$0mN^Lq?vA ze;)2-8GRwaIGUBBSuW>n$9xCzzpGdQaR6}$Fnu+^`rHeL{UDKa9>?zhVZl(MVC_K3 z2|}B}>%qV@2Nm`s+&PgsQozjsnVc}VIKY^#6!se0eLpXVaw{Ja9%F(Dp*jqK8pIw( zcpHi={1R>cz#On6)Qbw4#TSPQ2{;ZHM5I{D9NK*xp04Ovm8($qm^g@uP&inG2Z=CP z*4f}32LJ`A0N^jZC-?#3c;dmubovN@ln4URojn#LHR>AUkZ2}YD}4r0Sxf*cxuct) zijfEdtdu&_KxT-C#ZSz%yN9OWtHnQiGB zy?;t($3ogb1Tg@=g3Y6|JvZVBL<{^zml9sRNKJ$E5kFxP(g)Ylr9os1Sb`(bgnTX# zkmABV# zxrA_XeqjbIfTSKmjRDloS~$dm4l8@L9goaTiGWUTnGAxAf!p*dTZ_S^W;q#(tnDsp zMTrVG7t`XDpd2hbT|gi-y3>HE&(>P(97ZW$0tK29ofbdjnJ1QT$g&!HT*0ohW`sNq@)=Lo6ek%4L214)_QNdJt)%h2?6#Ih715F;Ng&lVt| z6Db|Bam%FYEbLMob{{UZ@_LZa>&u*1z3wM!wfx8Sz}(F{35Eh9hWB|h$tw1IF8xq|j>e4IQ2U@W^G9{IB1d|D{(0hkW0N8fcaMH0HMk z7>j(qve>&odJSu-zB$Yy^>N2)X4(fIW5K}B2DV@9)_|2|Lfx?talLKwfxuN34oj$P z#zufHCM7NgN4|kF2Zj(TqW_93!5l&qf3zp;3}*qh5fEQ>(poG61QHT;wLtzDUdk^S zaV=ge&dluoVHp7jD&*y?9Riru#0M)c>aovfzJ#l_%85avT90HXKRAk?VqYQW5Z4HL zGtyG7LIEHsL1qHchKx@=vA_~@&W(V%kefYb`omV)5`$k~dZ>-&cXFoHc&tAu!s zdte%O`U_RO^^#Md6#x)$90)o(LI+MJOnMQfI0lG8rl@g#zIR_!-2-?;+t@lJWf2`hW zPa(Nv5ro@cn;SC&5!T2-ihX$F8FWxi9}fb!4}X+}+sbp0j&ONl+;|P+Ot(0xy*y+R zcJIR9*>1k){@GTZVsI1?pdw;1FsP<;$_iX&sm#sB2C1!fS#y6am8E3g4W+ z<09tomrxvhID^mvut7k;Pp0OfVgBu}zFl`b0FWRvLNbp4c^B-#*n7`z&o?rOd3#30 zUjhsWJO$(?AcXt_*y8Jl<*`J*2A%@y2ku`BaCJ2jzQ(8i>(Mg?aEGMuOe`5 zvwI;oSh!K|4i|S z{k+t2q4|aE7fwZ{@KG(!9A#n>O{bqy`6gWdboMP7g000esA@WEDI_!9R z-`6)^ml{lS^22&x!rm%sA5iA`d@=7ghRrW7Jec>*;Xa={xO=7R{Z)wK`KG7B(3ZOc z=jP$RWdBuM*d%$Ca^apVA}$nt*MYXX;FwyyKMCB;(v{W5>5xN(T>cuvIqiGCXR7tIOk<^74ca6XRfyV;3*V$0w55 zz+PNB=clGA9kyNIZzMbK;g9mtqp2E*1#H62&_)n_7i;?B4d%=(kq(C#v+USg0R%oP z!x4XBxkx@9WuRyuY95vbD?Xs=bh6ps^;-u}O%c#3M~prH1Ef!CfuMXty`KX1pk4~f z62d*>m0l2*ASV)5hXlxS&@WbJBoLyvpXMKPZCa|;^JRd!3t&|5bb-Xk?%=qZJr7y# z|AX%^qtBHO?gvx!$sbt}CMnjyf!WK0zSR-s_6FcbP}EUvM;apj z6Wq4(@+`*d9!k&=r_SEHt zDD+~c=fwJrXWR}!lf}Fq@COqj><;TY4qD2p%+f#*;Y||mFAy?Q%%8z%;^gKH1`!i5 zl~Qi3puBilVV`{d4t{?FxRw62W8E@S;tm^w78eru1C5li|6i5wT$_Jk3Dx2&v4b*Z z3nENo(M|uj`UD&uGAtb5|7k*=2SX8eTv$YPJTcB=O!a=eq#~E{`D8+5h-K8OfB93f zc5vvi#~dUwal{a%%^pSYK*hWtQ67yN*>?KWoBk-2-3vp_x~)`s?y&xP$Wbm0x?*QEegTDE*>^84@s!;+avyBP&?wa zt!Wl_mVCM1@%CQ;Brse6k|4DL5R6Ha5IzSMmj}x26eA!66aw7Pkw6d-3LrW|M2`(? zt6Q8R*Mi6LDVCT&p==t> z4<#+z)*HTQ*|G3_9JYIQ40>M%)!_*k@3~JE;}*iB!oo+?oPT?}9gZ}Zu)v{#!MsY% z`MCJ+<79^z_5Pf@;E-5g!Uve67G6BLe_;jvyW!w`JYhT*FPLimE)rhf88C!>u*76= zvUceujC=bV#4(w?2)@>|KfVJQ*zETK>2*&Py9=>kYJ4OAfA`;2!eNKsFhoU!eE_W9 z+EG^%!Brb<0?Yw?2?fFg6v6AHgjDUz%|-SK0B{A#((AQIQeV!*$uJxd69^$3UZTUr z#GKB!dMx05HDNC$KNlQmtJpis`jeJ&KQ5tmC4~}#q!5Z~Ol}Xgc?f61x`pmKG&+3j zT>~jC13=M^HKRwuI`Ybcp!(-OiWQER-_*hACg`AXGp0H}NlGk1279@%_siavWd=zt z;>P*dWd$n&2g7e8I)G@%UgIO`62nIPH6~2!8yC<&M=xJ-AO#!&J`f;OhuG0oS0p7Xzt>hTI8SXaHB_9dt)^gYV#&R@313*NoRtchy{yrkId$ zNk(M??5vtzy($mI!-bk4KwZ{)XP=$`n2#>%Ir902loLA`B6e`!B}zoR(>`d0M72Z! zb&ne-f$h#^uqx!aQun?&j_v<8uZ!f$ARFr7;I>o_kUXX1%kE*O|K=wlB6oM5J792J zWh`qj9~zU%Ag^Z+`#i+>af`PglFyjMKZA4~v1FCpI-=mf8cil{D}jlMkk%zw`|`JVzbo>OAfHd4vSL9u<);fe8c>-^g~m?P zUeOC19lDlozkU=&5a<8&vg*NrM-C}b%Er4-03^G207QY8>4Oe)rKdilF;ZbAGDxpg& zlEL#Z+x0e60>Ko%_&O{S9Bji}UM?GxsojHt(+NQDG)Z_rz#V)4i{1Pe{k~y5g#+iJ zwH%MPGoCpPf!Hji1|=6>@yAIA-u$W1SP%q4V7wlN!5lzLcq0%Lhzitb zl6*W~+nw9qSS09|-OZuBZm4E@-&mQgB7oSWS5`i3XAs^r&zu{BY^h}~=;yZfWUBZf znqN=1U>A-YMHOBh1s*XWN6?ml)3L08)-o0sdA!xY;G?2oM+Nq?eT3vM5v9MRDB`dO zkyrNTi3QQsu1_FFBu@Ef)&&jH}OL|e|ft6^h1#l6>ot(svS-;RgthKbfHjk zXM^nG%D5ho7g#-R*y8DwfEtX{@V}Ee8#_|_nN#JzU2?96HnYP?`^rX>$p;YBCVCvK z6zTUK#wN#3zu93?937WEgRv(Pv<@*Nk#;nHBNgZ!u7eQVVG2fu`>|ld zSSNmL$V489*k(E~ybm%joHHV&vYkaZEwU>hg1%oqa+pZK-rmRb{tN4E_qhE0o@AkH z40ss)L-9q5b<;O~{6R4B8JC{VnNSreKnk5mmZr-6wW_9fWJL^Q`Glc(cwj>Y0Fpp$ zzu-8HCt2B@&5?Q{vp9eIyAi!>1FTBeIIIUu%e;znfB#Y+)l^p2d;!F@PKnghSQ)d3 z;3I3kA@Fi!M_u3?O`=Z(7v)pP@gYd)AdrsrG}&?C*B13$5RMblz77H=Ol_2gATYaP zu;OJ@IFTAR4y1!5ucWG3V2XEb+okGrn1bJL3N=GX+Mw>+`%c_7olXS)%*? z#exRE2PDY068>o-W$c0XCi3qL0Vp;QL(DW00tBDHEj+&0Kya)F1%O}lxol0$5E~+) z8}WSC8ABHpQ*tKO)pZ|ZK!(go1F>V-AaDUDA-$MJtMtGu`~B|!vfE%Vz%;{SGQ+te z5P&!Y=@6R~%8e2ZJ?6fXH3)I1Mfx#G`v&DkH#Au_-gz1E>KZW?xrojUM3~Wu*R@ZMC zS&d+D07nW@A?1&3Kv0`vU+2#cemqJwM`z{F;tx*M{xJ*gLRtoa*t&~8LPRCMXvq)S zq!Begr`Qz9A@L3cLhCw8FA6J?zWoVQ^!XVq8OFSk5u!iUxo%$#LHG)A+$3PHL5svZ z5#3TMiE_)!CLIYY^a6e-`Blk48#I8B0F#>%9uSZJ^to=>ATUTYJkue_X#o`~mRoH) zjHO8b6^g&(?rZPnCsC`WKa`G0@N}|!K(}TWP0Yr9Q(Zck>hAx-=q#ROiKUVb+Cd=x z&N$~CXf^p8PA|%$AjA!uyp|w;lH}%8t=<@ z>)M;x{EMN?JUAi=00Me)uBMl6yBr=o!j#W0-_XP>8ZG6_IQPSXOXbN+cp?J#hajdU z3#MplOz2z_O^;sk|A#-!^5n~Ii_kUsAktv~cU1Ymlu(u1q4@|AAs1~3-$OE#Db`@7 z5Cl)LFk9~)nddW&9)A>G;V<)H!tmkvXsEerH{t2J@=fhac!;P(xR{K5Qu>$+vB^O# zgF4p@94^n7%s2&t#VZ0=5c>rHZg1SQSSic+d=FxcFSAKO8AjQ7oCATB98p4>p-BT3Dpve4ZKSjzbP7-c&yJx+F7?eEo9<(TZUd5LP1+X2LRS&X z#by6p5W)wO9>+KOcxYW)^zYyR019*=^GpUgL`Tji;bqDugz^aS9fl+7sGD-`rduw6{CDa>-kcYIc=1d<(*H9S2#9HXp{zJk`_`iN4`BSe`a511k@dctoOHR*-u;6juid=GI%R z4IsQ43M3CA1p|;ySm%cE`YxUq!9P8e*aPC=iDh=eENW`8=dsQCG&@>Ot+Tk`^UfUWx5}}&s zv+cwoEcN^H{PAd@fKa%h#W79b?N{7>^Y0^xVds_>qN*t90E^%&g#fz+KBqU!!S=Hx1#oPNR}^KjN8i6i=r3knuL*0vn!a73g9_reefUGFkkL1YGF35vxS9HPrCRx5E{_;`#13Nsx1C%;inxPWUADpRk2Gyufi9;gSBNfvf% z#J3n7B4H1+iUutPB{eJ`7e(vy^HO1;`&;ZTaB{-((FVTHSUS#H&g6E!O#{((s7}X$ zVLVvqLL#RrV6tlLW%Vfi_7pHc+z5mj-{tL!ZHgWT5tGA$Ou?jm#SF2qfq@xwkEQSe z2(B7r_k5)2DTDCs!MzR?NuGCjK6-9PS%y3Jf%=NQ(gUP1C%!zQC>jT#dGcR32?C>! z9cy?%A957wCU7AHAaXrV6l=U3zXvbN^f8B+Tq-?YUMt^S0#L|sNcSwT`!;dla8a`O z69LdnQ1{Om3NqeP9u5?S$H(DzF?N~xNb>-QI1vh}XGmNaB3UA*4=iUUy!BQ;<`5vt z0|IbDCS6c!N;rmVt`2N3ImH9o@aTLTRw0b`Z-C8*^Y8K1fa=YmSzAMbVE}kBR~`k2 z2LEMkDs`T!!~K#w!VJfq$#LzSLZ2yGZds0PDlLdJH; zm+@Y(5TzD3b`F%}8GpduTNf1u!Sj+mB+mF52qwQKC66e&291MY%Vs1vSXl9S^EF3owUGO6II{Y9Em&24JKEk-`(vH{>c&CMS%<@C*|d z4js*a;3m%VfPHaX*FtefhJXS_ARSl(w{UnfDdR1J6uZICNE}Gwid-L%sfPe&Of|$p z3ISCjKJg)9I{p!L-%0aV_%8rucvr99?;nW1%;Fovt_K9zuf^8_L?IQorvo|t>xf!< zj}bDR3m;7MCj6eCixSC;uPt>o_{lhWwImkqI`AENKj!6`W>{i}f(ENVAcX*3j1eHY zVzhisjqs^R5SQ|=h=F(E0IG@~#>}(PmXu|rKNsmEMq)G_N@z;H*M$xVJO>J@RUbt? z0;POc6|xnI+*X1fKui>UzM$e31E=S|2bYVW_U7uuI~6~`=zGqlXjHHIpxJwVXFH4o zCvmNkW-1(hDVo8KD-DOACG`y{aGn;azrKGWH^I`tJBz~d&dN_0tsk5JH+&N~98PFF zkZ}O@mg2us=QLj8J~)ay1_^lK!S+Rqy?E|32;p~s9VV7sa_Q#*_LBm-{r#8513*q_ zT*BUdR*kvLVE|gpv8}1qW>D0*|{%RfG|S91YQB$7}6LnmLogPQ4s>=WLGCI_n1Gsy)A-5fZ-Q+ zy)snF_@Iz%G9*f8K?<7VV-($aigj1hB z!NHA{#IG=*SATrSlgEFT-tf5tj|`7!UjKl{bhO21kjdHikqU(>9BR`SQ@Q4Wqbx3# zvy!v-_NXvLQ*JlL9l=V_s!x|@RebygB#o9P4n5q*{qsI^CS&Pgv zz@XOh74Re8mt<1arXI;lEdjFqOk>nyQEeUA52FFpuBoElSazeSgBDKw3{XpHpvJ5_ z&CubMW`OIOBH&LFAw|G(jl^BVG}i?L7Q15ce9(+T=UW(B?Pk~<7b5%}i5MdlLUogm z!H6{)GN^c23v}o|0vSpvpZ0#X(wP0L6!6~gB1IBG78g7_M5RR7>( z(R6^F8i6S=xPXHQpO#!4tb+Xj2cJSqo;j43{nRxEnC(O!UcdCR-DBd#l@<-592$Bk zP)A%+t_<+H?)*lhmxh0ouDBEO0Y{t$AjKl^!_u;-V}G$3HLkMztFdo2@o_xkWQdCN zeK~x14*;xif{6^(gtRSQ8XtawvClvR-+&Vl0)f~WZ??W)F#Th<@mBv|GV6G-C5+W& zo3E-#3Nc5;6i0kCULF&}NB*Gwlq4=3Q!G=`%j9mCvZ@4CPLx@mHB)azY88Q~6Asxq zu`qW-Qy&S{0Y5k7sY+ZUF>nqA$`pk%1XoLifl!mmpdZ9TMOM|HIB-WHm5nOZ*Q+H_&{HP{dqns{Z&ttCQt4E^MCYp ziVESd_FEOihEspldSHWrFxR$z!Fe$6x`Ls=qnQw$3j(4eGTVFWIZNu{V$?Lt>*U5g z-}$FQ)zXA1(RiJ_7Uw5cF(zPNF^tgk+w< zUTZDYDqcm)Rqt0q1Z)~T8aaF|KEja?VAwr_CQ$y%l38yBFfWe?j|C49Ucuj}ObdXp zPYjCJgJ{e6M*qCtv|&L+EMYNn;J&c6a3U0N*vl?l7_fRgE(35+25}MgR3E=Uz6%1v zA*O3fXdTERp#GSeJWDGMMfxukwk^hlL_537D)krJ7A2XeVp*xMc-ejQf*Km3ut;gl zN`atwZHt!LfY3IYK+rXzeIH==K!9*b1FMtboKxWe4YlR`@#i4Gk9c%cvR32Re0iai zRwDy?5di#-Nb^pXpgAKYCdD^qUEtA1Lx@M9zT@!&L1QS)vO=I-Ix2_!c!t_qnn&Vr zt|773u@1LXE3q-LdvPdD4u>-SUHkz(u?4|ILy$()^8=u&%i+Oi%Y6pH+K=B2YmJ&* zx7Z^c9u&oH)p}1_E19Ig=w{iJXi*zjy>6A%<QBkz;FR12S5{md@~C`>H-95CJhC^b<~qEr7HbPe@d{g zAiASQz5ZeGg=r?a75njnc8ghIO&jwIuk7$l4*@{Ed>WXKv5{p_V>Uh}5*Ue{4ig6`sNMV}M?iFty+yiR?gUMq*nBu}KBZzjSbs zI9$~nQ9Rm26)Y}H-Ccak6iNwzS5hT+8tkf4uh|w-iqf`e15*(d0eKRWKLgNv7vSjZ zJcm+6gJxl<%_tZQTvdW_Vg>l@oSQ2(zC3EXvgc|SQU_)GFMr*?Yc?`AZ>JRcRkH!7 zXuJ=$|1+=_CN!eWqnza6&`H6@YV1Bj?^8dR?oyPipuTJ@2?fV;GvE5OYzo?zV6{`9 z_&TXHgk6<@uN|ddEis$Ij_pNmE4yZC{iQt`cdhupt@MIy1?bM#s5m#znq1ib{e=5W z8a53daeXblWFar_x`0iG2Fc8Er-I)mE;U8Qd5(mVIlF|^3^dQxSARs_5 zJPr$m%3XtI&Q;Mt5Qk8N6IkbLO$3NO;TgB(_#^>v0mFuY&}6v0eA** zOWNKynbR{jlBnYvO42L2wx80{_RHXyXcj;33LFFLWV;8 z$OkG51DfWZ>ARq3rtc+xg*BF%fa@0cx4Z0D75Dng#OtI4@hu!YgV;Em_I4Ey&g?GS z5=DNiZaf@MLoApN5;GK&v^LfkXrtCIeu`gEsp@%H7-fvANOy7Gx8}4qpVrf$0M* z134IDadN0HpXcJ5qCQuZufm`nAFduoKEdl0!ACI!#Bs((4dljz1%@EClplI4&Co@D zcx9!-CR#&!{k&X&NwkwCGoV;05B)}Y|b8Fo{?wBk+&E@tl+eAeS4YK$BbWQRb+L~wHQ;5a46 z(-pn60%#}*SSe)1_AB?5S%ut&9|5f7ZV!0=pvG&!VtA28DX7_m*cb%~_)ZomhdACs z@7#H1yjWNo17Kne;e2Pka4K0du+KQ=N;>8~V_&{qF%ctCipC5SL4uCaS}V&_)%c6pE;& zt)*67Kx1E2956N*5k6>v*(>-By(GC@4q_J{%Ig_!K_FWS#D~$0eEcIWr605tZQ>-a z_%}(-f2C9C6|={xZ_~Yxh5;zptX3%8SKF%ZLaF!^9)-To@Kad2wd%Efhs+3A`blZ~ z^Hgf;_(52vui$XHP_R*BP;oDyhd?LF6bOQ$SjwI~9hgi@p;mwkas-+XaB3+>hau~= z7Rjg5Dig*@4q?6n!WDz;F`N@uwdoGznU)`cpcs$E>C>p@p83@9G!w11A#HdLUcMAFQtBKjBIjJP|H55y;FCp)B zc&yE3|CtE6OM$D{js3&^5TsnFdyIHODagN2E~CxyO-P-a^X1o|*-VoN3-bV*xv%TJ zK2dr8?CT#F;6LhE>Z3i&+38G+!;DXKhb@ahJ&6j1E57ItnP>|^3jGhCI9!RVK{#22 zRQdD`gJ94!cvd7;gX|rHnE@QYfI6rgc+oGs97*aaOVdUGeg~)7Tj9ezJfIz}qo@9l zXDS8(i`~Uu+`U0CECSp<6bu3+AN7u9jkq!*Rp8Om9}EE}ekcnO?VRMVh%*!*NTwwM zQ5h^ly3^%q<#fSIbH|~TuznkaJ;B>N7^+M(TVdCcY7nz5@`PL5k;=QhYfrF791ffUU519N%Y0FjsM4$}#puhnIz%;-FXaJdl5TI-% zHPvl7LU1q<9ka1>P1Eps%Lz!K7_S`PWppfrMihKLa^FpW5^$h@LDV6iHggjE;Mijl z>WFCCD)1iKq#a_F4Fk|V4|ed7fk)&JOYj@Ty)PYc53<|P zBMlx*3cJpEf}v;Q`7mPzoBO`b`oV^xye1~OQXiLhOSm~5y~|>;Sg)d&e~cPPI~MlK z9CzIOkWd>BZT0)F$F%eh!$ScOLMYpUv)nFOZXNVTLjg=ZF5y^K3XpJtqzVb{QCRN) zKeqXNKf~TT2;X!n7Gz+M_{1_fcyHTMJ^1(7>*yprQN)@{!|G>QXF7q5tC;@rEMAHp zK)iM~W%uV_ma(>($ULY;_zYStnWP z*Ao(R;5juga@5ScbuvLboZDJ97=9zBlRvmFi8I|c^?m>V1FIqRR0h5eWew!>@g8{K zgI%(6#^Qdtc6z;Yc*$_9t`db(AUNn^I#-p`SrSkGA#C6^|C-URn z#PNIyeV^B2KY{$p{GNOz-`wjLEMzyXP#{OS5@lbUq<5%%t$S3xRZek-5G!Tv$NM2 zvXVgb4vij-9uVRLhZF1ixX-A$L`J^TY^Du%gsflqjYurk!Jjf$;_IWSiCs!~%QksQ za}Z2H;-Jn!C13nyw#TTYGwzo&YZZh3$RJX(%ozFN^-wY2EpNI2;(*O8UejT+?& zYY6a06>e(`wUzLRANEeuGa_H;a*C;Y9SA`*568+!bUD;CnD`V>lwI+NaSPo89{Y1$ z?Be*2J(C9+eF}DGXYqnK6^#~~8yk-s5g+IN6!tR5f$sNXvg+3Hu>x7M){Ml3a6%g1 z$cCLtS9V`(#)R-BpF@;`7Khi#vTwpAvAOfRQL*n)gaM7;myGY_@5I<*>$Wd>2aLS2 z?shD6Kx1HKtG%8iy$Vpj7%h?x%?@EIDrZrl{8uX|bgXLM(4`ewX+&|u-8WozA9b%k z>3BygKPYNDOW0rG1TxWAA3;WuyL}olJ&e{lZeX7G{;LqMrx<3M?R)JYZEg^xmza`S zh$D^H;|}+CckGe!-{BD(=zHcsztELjlZ+6{RM37Sl$HpaEXdbcc%C%c50e=5lCUjW zHvYLI_kCrn+x$`(b^JFf+>V)?KDSJ?w*{C+@dKwjp;e6wjV<;%K_WQ#fW`ICYe373 z4Cf?rPU~F{AowHWpu4}pKfd?g1)#vnNjvP_-+$w+m);0l-f!{d@~9&}?XU4?E>#Up zC6Jwn<2k*h!El7Ucxs{9fuUp?^xPNI3=`>k#Qxe{7$?Q#s|{+J|+()=3Got`{E2Yl-C;; zkO{L_c${JQPQ?9d?EU{8W?-IN?j2J)@E{9un}_FW`h%J07U9{JFy$!`9JS``eh5 zztyN9B@S3jf`ZH;w*UB|$$CLE+@J7?o2P(g_w~WFNy+>nR#zk?H$1i2zKsLy>nzg} zVEJ^s5;2j56Bp#Xf&v^fsdx6@FJ+}*C7hxY+0Z>66eyx`_DLY&qSS&m4>mvlhr2&5 z8z0PwECe{c@L&OZ0PP9I+f>SLZD9upq;3J-Cb-~jt@h&4_NBZ`vciIMV(qM^w(D%Q zo;TnPwHZ$S94 zsG^5?k4b=Q=pb$**3KmSCx7|80R!uxQE+v2Ki^)^Jtz@@(7{#=4xui=*|ToK?@7on z*_=T~3o#QB>2h7M*d7W6e*-@)P#@rbf%u?!%8;yYEgw$`6h;@nmd!%@D9wv}NWV9C z`1UL&4lyB6Z6rS=DhB>z#cv00Pu&YE3#K#2fxO}-gn*dVZb2djAp1LTbX~xD!?H^G^KQDBsn)NP0Q4D6 zh#gU7YP8Efekj2w^B7X@f2_Os0U0F`oHTvhtz2DN<3C&P7_Ffk~V4`px2P(4=2u@8rpy zh>T(}0WR&+zIOg%H6d}Z6a)>4SRH9_JjLO~X>}C6=Boq^n9oE435qdX`Cm(S%{@@j z1|U3D{0^yktw8IRU2C{T55WW-SXo*Jp(*|9#7J~%qyisTSD7?WIh(8;8bYiOFtWqb zQt?@Ne2&{l_X}m+0_yI>6{KK40l<7hkd`5e!SBF4$ReaG0p=nwf`u=$E#3q%fd&>E zv15{n*!fMn@iWfdGuiZR+o_+qQyK3a^Dgc#gwQ<&prwKUUN~1Lkdj7UT}D%m6Er+vfp>KlCvR;`9s* z#Vrl7j5zdf$@2UH{;ds+=pP4SOd3gaAMzVu`4~V+Yhm;~{77I|tWUV=^JVMiLn8IW zo(`<9mm<2&o|vr8E$&u7k2C&Tn7}s4^HV|7(Jc2%l&49 z0brxAA{CZE!7q946guEG~o(GYHM>hiA@tS12kT>e4rwMFgzR4)Cu&2qKDMC+*$Kzm~1 z!;pW1^PG6EExfsr{85HQWsFwmiXUtH%!F9RP4-fQlePSu<`(+fLOs@YtzD3 zInwa*7m*rcjc7I!VJe5gOBWHLDAEiDl0?R5gmI&H&@>K#rMk{V^rYSND?)t_?|d7K zHV%X+#Tcws#D(ts;>DDSNeAeJU~MMRgCphf}mWKt20vprI@7`<={QC z>37FH6d?##(P|3HT_L*jGj6q9Nu5T4aD}jcXkCMU6oUuErVsgcb_k&BUH@>9u*ftLbVnU$h zz1T*^fWNR=V`6WB<8-sg+h_(oW2-O)71bxUbb#fR!;UdXUN061i_~1Y2LimTPnu0G z4TcjZ_lx3#OS1>~FA!rM{XyEASXhuWP?5j%|Ae@YWwa{C*sNa!Wb90?$)RQ6fPC>{ zLg5gd3D0UwTvj2ZyLcoeg*f7j6Yc91H0{b76xGPzb48*64IThNMiBn+aTOqoXe93! zXP0i9yH^-fzTfsene1Gb?<`x;G!H=8iNelQ4TOkW! zL0f_;J>uP~OvM^ZA8zeOTXx4Y1{>4K4@2?pMckFAwJ(*2fdJ?fK`_>C1KVAhFg=>! zc5<)$^I)6+*^tLyh%h;Pb@S~hT|k5ZZH+-TKo-G}se|kqoqUREiqhrf8Bp+V6!;Pm zJT7~z7?-B;5UvY@UsnGR^_ICMZ_n9~ z&dppqK8j>9!A(gc%pVTZDahY2d$cm#+w~D?Fe|2DcF4WDXd6vR&0ylF9a%T5JG)7e zm%w-)wtgPnH5E{y&h8#Ci%g~vMh@@cQK034Xh+C?GX%ea1w0#f~)=ZU75+#O;&krbP4Zoj_-0Du*Z-uK{0`XAhd%N9I* zw}ZGk3o@waEWxc10ffkH+hQDbaE~RT@Afm<&~=#z8f<3sV+TO>H)5_Sb~_!775r#_ zu&_i*ws62zR6?fUuEk&IEI^A0a2;)?YOhTHuxuYDE~pL_6>u| z#<~yw4s6&q4>hpSkLwqgQ<=PaGYUN2lf-Cn$Zmx@kpGZr6`(-i@74FlL}sy5B9=dH zoZKO5VjOepf0C%|bTk>)#_0#2<>(Ed2vjH-4-#oXey?}M=peEazHLjh)Mjw zX(`|k^myTUe5g2Ge_sQFj5=I~pAyR6d{`TB&HaK#T_P!21Tk9PoC* zSn(ha79w=Sf(RY)U;#4%NWCQv78F;=7!Nk`B*sD_eker#CWt5TWY9DZDH4&TELn3B zVD=2ewuSQ&F(xb5yXFob@CdR-(>No33}bF^)11hw`vQqmB?J4hSLM6#AOK%rLWT`#&qv%BK-DjG6h^S4$iNWG`qP>C(!)=>bK_Ac)nBd}8eq4m{BM9}7YD7l?>o zd-zcb2Xb93S`#(@D{2~IP*fU2A*}?#u;Hv27_GdIsHezeu`_%_*bK3lAUy&j1i>i9 z&7O#RQ{(6{&dOhVved{9h5nW%iQ)4{cFacr!1m6YlI)AYb zY!aIXz)DXFuyAfq{+g&slQ|yRZO%dYVm-mjN-R`Fu#5)>`Hi|LhBO5rYr++PP=H8= z)nvW6roVA$miYm1++!}B8iXNGVjc!dly)8dAK?qu65^^Te{xFzN~*vAeax8Wq7EMf z`S;}@zG&yJKTcS8fbwL@@O9=l?~fK4R|qXosI>lghAUL)Wkrhbv#u;|$KE^X_N;#9R6b-t zK&SnvMhauSkn%@;tbWejSGNcS4y#-7&p`5>7w;%mf+gP1ztKcz3=dFg3TN3tFUlAQ zej6E4#_m4)xkbDrf=W=EG(`_!?76K+9Dkuo4wg%+m9IdK2(9RL%WE%To`1wJmi)xq z=Kde@=3V|PxHywOumAuJHX-+11~`aABpF!vZ%g3L{4wkXr+~?K80tGeFNcnos`ynd%xBa@ljacVj1z4zvK{G8JBvwhe8UDTyK)Feuc8!t;4&-4jgta4nj zePoYTKNukJ@y2-PCi#01!qYP6w;eaFk~p4JDQ^fqo*@qM@Y2;dpuU$GswxXG$w65< zgNw$IzcyUcNQ29_>b_gtBS8|x4;fV#@#q@%<$20Lwt=91>g=aS@x|MP!lDbLmr87A zH2HBw*g2^oAcK0OnD5VesYOD` z5^)bWa_m-HTd{U>*sbuYWE6`v0TC_@8hhOpy=2b)v>y~;n7iL^HHC?T%yl3o=3D~# zfG}e`&z7DY@B%6Txd2*#Hv#DZ2demB3!7rnYy^R8-RBn;y+~d^pmZN3R(AWp*AKxOTwoi3uU4&BT-hu0^51xymu-XtsUR>qy>H@%g2uY!YtvGCr|E_=i<`ypEPVrix z+4jPUxbw(|9DDg!I9!6v1d*{ZjtGnQ7sZ~UovqcPTyx`|>9KtDp-5(uWR)lKCV{J_ zzMQXwhBzqMce zC&$xY{$`>X;f@x!#8Lvg?%$`oHN_lh9vahV2cElg05UNTPygtGSDs;0_@c#ddhGvM zU9coh11x(2v;lmPBgBZH_G7`-^1c}aV2%mn1%fEyBHP`sq~%wXoo+0?314T*GQ0ufmB?*mT}a4>NDFMJG0s7A4MRv5(?KCG5LViM(ftB zMBZE#icatTo9h(#V|OD zxW98N_@d28iRv>lb0P*3BP~JDvrr#$*q{g0TMss(1{y^$-v|pdqgFh-fcSmIpmj8v zjw)MNrL@3*%#Vj>fI2WgllC0AfgX=WkHnWpK>WUN7wpg$qJU=vqjZ?#L!O8uN9A5T z#~eHdIA{$37!aVJD^~+%bJbEY5ENPJ%YVKEEvfe(6y@9^6aXdI4D@t}f`oL)qtEd6 zf?TC9-g2z=r9v*abitjN9UWI04U_AQ9EOVxf1<{QwhS5C9VN5Q4d8 zBcekA<74&uzZnUXa98BYi#%)0sab1-b zhdovTuI-uQxvIAT(REn_qGj=3n?1Yd8;qHlpxPKAstFFg;pRISm_$O^aQE-VjwxCD zpaSrKVL?6hE@HY~5Z3)=eVBp>GJ6(8{@=%y3)!zTIeKqpBqA3IZd|cguO_8jVr$HY z_$?me^DMp;M3iD<4T;g4UOKRLpoPni;Q^HMlUoi~elhT#u>{bT7yX!?4lGe#GBeuO ztoWDAKh8gb0_7#i1syJd--M8_L+pJGA!A=|BY}|A1GhN(!qqflb{CrJ7Bn~ea@GQJ z%y<~UEP_zlKrAKfh7`7B+`Me%(-YNicn}KNgboVZ?1iUa*9d8n`FLD^0+*mx55q#h zqzwR=IHeDjVrzoJ64pu2vWa9^*#l)}&^QSO@x&QdQBfpQZBR~@#RjV0w;=`kB-i?N zL*R~!MEv}{UMN$LKWrV{C31=9e}fr2@XKLd+xd>#H+IQuGYAgA;T7z^MT;|3OkVEYC> z%iwoPb{Vlbh6`w5?k40y%T2d|$tI;<#hnj1AzQ1JS-9j1cCwzE}1Yq z*<$Hwu?xQ_$F#}$YuNRi!2x1s5d+C;ss--Ef6ozS2JI-yO@{f0c^G2uY48NFoyoowsJZ0E94u z6-g6?agZg}TlRenMT7r?dS(d^$HwiS%QtsW{A0%^5C>+dul1K9)GlfHEi@&9PLoPoTR9GL*|RY$ajJTqmH%duw+EV+k!t6 zW{i(Qns0`l6_)EQC;?r77tk#Z023?$YJtoNrU?dw0+Za>RlZvY1@1=TtzRq+Medd0 z1dsrn4DzkosJkoW_&EQMP;x2UuhHRtj}LOG_LcUfw62!$^-yT zG4w1VP$%qve)B3p@T?a3ZnY>G2_u2fJr!Q^??M{5#0U(OLyq)TFIAkt28X z^7TMdc;K0Z4ZkSVV;8?@rXY`L$bmQR0KVoyEsP3a2o>Z)Jw4P^(rcG}`Tk{Ia3z>JRKajD2sakAXTKGN2BBL&D=kXe zgh7}h0{A~euf|Zm;=4%2!P_d8&cN`la= zA`$7Yyseg#14uOjAV3O0$O>T1qLP6GDXg|p1605bO91Guv=xn!U;sue0D|Nje7A=J zI2uF2>te6v@X#ax-V7TX3Yb^EG-aPM89E1vZG#0Z>2@NDH73sMHA?QoKNJ-(uXGaaqh(FQ;|% zj?90f?7ajl>4`Arm0U5yBfd%01d_=_+SP&fe`Jy0-4|xTgoNi3J1UxC%3!-6^tc% zj23PiwIBO04m#ZN68RRvh45_fF zpqSv8z(LtF=wb*U4g!y#ly3Yu+YlR62Ge;X_*`QSZ1zZHg@d{SpvH9?hcZwri9&JS z{l*luzA+OTr>Gc2BJh3R2yd5w>Sf=mq*w(KBOS&rL_Br#*MvHQS2i8FTi|o!h$}Yu zR?!|fM;tK0Zu79atRu!J51TLFK}XffWo65+#tJ8hh5d2;7CI28`g${(t;OyMJ5u?E zY002bLq6(Y)RA%LCAo1ac{b`XbE z<;13s`x^w}O6>k5C?^JQAj<Yjx+(SnP^F70cB1>LtZNt#R(K^ zMCaIViHO6D7B|0F8)aT{46XV4Who=<8Y_*Tm*wg5>7|6i-j2_*;!PEb&zIua zNg!p-8V5$f-lF>kDi2`(FW5Zfm_CK`k_U2FJCe)R4mcN(K8yBYUFzgb$&R zxrLq~;_loXKW|WoRgQET1`nvhUU30USwe*02U6*WZ86hrYJR?={s2}`32!)PB!)9m zZoq_$fF_^!T5B!d*P>Ya2h#F0`6wC(_9FqndnG9 zOF;fQ3mPBTv_9+Nmv4qHid*Zx2jL!vs^b{tqcJSRm^%m9TZ}hg_75RxS%`M)`A5T# zDFPB_FhKFj1;~Rj>X0&`s@Je#TK#xi^RqSxBcDwIoKOz6_ROMYQP++_-SxX&Vk4=t zyuLBgiRX7-6cy`MGc!T}JZLl;Q9Sq#knFd=UR}v7EiC92@LV@qT-P%ib4dd$CYPgY z(-JX|R=Iw1Ae7cU;=H{k0NcPLgk<6fiH3Ojqo4xGfZqr~V39sZHt9dl=qJi%qwKnB zO&k1wpr~Dgpm}q?w)OErWGWv2Tj76;Y#Ro0RM;~RPi0%ux8{&`A|??K@9Dqr8V8_% z7AK&61Hj}>fep4VrP1s=1xUWZMBvuO5gsp-_08&&!yWJS`mev%xrLL9oDUNqFN@Nz zNB?}Tx#+ABj08YPAS*)t49X_n(p%_1!v!ubE|1ik?O99=$F+)Nh$kw9+0 zoWPWELxiDg?X7Z)9=isjIoP>YqsqjKRM3#?kAA`TU+ql)`J7ffy?3k?Lge(}3k!H1 z>~w(Xmth$)Un;thHY`36?>~P=T^8XoG}ea3hR0^)J%jK9?0{%QY#GUH8OSL`F!#nq zDB4=6Jo|&@UN6Npp5|r*9}OnP&Jc&HS>vNi+b|f?X|A@mICN04`cVNc?uI=WlvuQa=|E^7kS(Bjj06LWW6}(OA8m%& zifgN$CtO+@_HndpB?(94i{ad=b?ZGlj5~k4+d<>MD@(O4X_%ba|??SA{91S#a@Iaui{VG9l#Tq zm@2-n@HYok@90^yE&WU@uyXZzodZ-nZT5Q?S+W1)qHpiPpzLH_6IY#>yuK zgMHv|P#34EjXqwQ>X?@*S3`h#b$ zF77}|RLV3aF#H{q1COFgA4?3o<`|*Td)Vbd`UmLc)BLfk79GWyH?_DrE2W|7lqqBU zA{BE&oBA)t2yZ}2+u&r?oC=Yc?Lh{%jb(ac%sXSsxPweM)DbX?8PeG;vDJK*lSo%d;{{x-ajXm7lDd^f(4dgH`v()p7^ z3Bo4ouvVh~1&jROngswsFci}cFy}gB_JM$HmTq)+`sMY(#jOHp0eL}4iT1(m9F;ku zKmVF*zJT6b@EHgp_E~?u_5y&3j}RaU2Al?g`AHyru)s$FDTyZ^Vc)&Y^EoRq^b_zp z(t!5Mh%B5EIUmOk75Dd>be}Sv))nv`+U(!}@l`AA;7JnLKY;>J#|Kx)^6EZTW>}cO zPD}M|SWCPWTUW?Pgtg1rjk#_SJa-nt`-gXQvahg+k7LrEM9%={9OUDr;}`^*?Oxh3 zU8>J}3Y#99I&f%Uem!;UT8@m5)7r!UBoP1!2GFl|@965OkMs-&2U{TkQJRh#L>@x2 z?|nG40$4Z!q)Dha8cj9VS8SR~6?cgNH*_9OrB|mCB9uxGQ-T&iVFBBy-*B{(E#_A! z05+cY+Y>xOLu$iFOJsu%=%!r*-SCaCr_1xu(S#@^3w#r_UOGfAyDw2(dD;X)dpYu# zbN#vw2ZF$WG!Pw9!SoUNd3y6m{^maM0X=Rh+3*kUq>kqNIN}llivAl0n=qgTnCYnl zhJn^}*9TvVHvXPlpbvBd-c41{Gj{cL0T-4m7v!|JDEZgEOG~(4O}EsbE8Z!Grh^e%4q3 z5DA&F+FuAe+3Eqr%$vd(JUogj4w0nE0d~lZtmh%}F8%K{C=7s4u_&;C#TcOB7yz5q zbNo;QpGz2E03#_>@ZlwBL_g~n!8q|DNL^SRWc!=JOn6rQ^`u{ju%~>zfSj5xCIt8@ z3ZrfB*QDa*>;y9xJV$=*`;5jPd}DYaR`1>2-R&ZwK~6JcceK*)niNk&v?wG%i%cbv-bZIB*-j;`1-~7 z1>60(d|-{dtWeYWlx~yxf)4KT_@jenEfAsb$W4aj2mF7YI%D@66ua**(W?6G{=rE` zOW$arVumym3Cl)4y}!m5oP&-OiSl}CyNd69`NWRH3zvW}r>s@nqJ_;!_%0GSRs)S+ zL#9!}22^GbbZwcOI-GlZ-+-t#wX&aaqUd!3y>R>O{UP0vGrQj^S*vHA=m>zaRi)Dg81TGaBl}U2R8>5%LUDsXfp@K zoCg3i0*vAH2LJ?jC>j6=phr6X$rPj^Xa|A#Vh31aZb%MRR2A+g6c@v=z7X)l@KIMg zEBzGF5C5Y|p?lQ4P(;Kmg}@YElIXbFI8##{fs=mMFO0+`Z(DVxxa=6(8%5cyDU z#$V)BMQ+5`Zaf$X9agr=(}aIh6vv)#sNfp&;W6AsI)sop&ES;d_8FrN6-kRalH$VD|3 zyz<9kRR%s%_1ssd+X?eA;iHvVqWnKaVv3l*9*^|A+E?X4CA-I$H99{}0pPI_9=C+f zWr^d>kM-YSTRH5vlJs(j42uhc1AHMft&IdbrTp#qm$h2$m~fG6sPkipo%}-!d*FTa zT-f;eb`%qdkpZpOP?Ile^LbgyQ9O)V!4}bDi(>8$ivih`o(EdO|1M$*B=6@%KSsEL z{_j;ivsQv(5dktN8d*zueXw65*&)&JPvBAVdPF;4xRO8bq|4ggn7+t2!A0UF`mDb# z;}A!oaCRbc6fNwC8Zmsi5CfU%2$N>Nz&e{! zoj`icFdbdYW*~x!2B_+nV0nU5yN{`@``p0xv{#+zjQ_6|B!awGzeX4qYyo4TM)X9s?AdPE+ z#2ctM9)MQ42L@9`S)-J744kWbQo(y*;F41&1MCL^O}A#u-XdNM5=sYoRH$}pQF%wx zz3X_!QJEtRKuSFdf%d+dSKpXMF-fgoptY?jbGBnt(F&I8!|A~GOZ;^HCT}(mY+kkj zJVH(}zj`KLZS|LRnOOQ5N%8&TP3<&qwfhiO56(gCRwrP*0~!$s2_y?M5M{1b>C5PV z7qFOnz}eHk<509A8mY4SyG}{s{8pOyWeb@>)hS)C=6c)YiFsCf+mikV8gu%a3Mm;Hc+I2aI+o{n-%9}>g&OXJzNwS^t8{XYttSY7W4Fb(P50@4Uco$CCAEGNW+q$VNsH3Y=G0D% zz$NX4K}Z=~y(w?=L7-3s08TmX`^9hhsq$CKAV%~m6-b0G>Z-N~m_dv>&-;D>+x%`! z@3IOmU*bmDvAk9-pE?~&oRVHU?;|j-^4=GoVEEwJi1Dc<;;m1*<<@|jR%mX<)Lyk! zM~7>itF-bDn*XQE_!A~=ge&qhVnEx=(_%o2Dll*n4OP?V{@5Z}OSJ(X;o9v2;9NLB zN!PE>5lsW&KZyFQq9P<`Byv!FY#r%{-pws(umz|9o53yLx`RUbIj~fLDM4BXp9YD( z1fT&eL?R-I05QQu(7nfn|4@B|2r_Or)M*i$4ug5Vo}Y*Df5eDj`?M>ONd5Oi!X++; z!t|BILTj*k^7f{PxKvQ1!8e+{>0sBS$!z(2Dhxyr0ImQX;uLGm#3T@A^S;)9--wE@ z8qt01OxvE-VH*#@v$-6tWY~mwqS(|-9Oys1PNxh7UqG4^!i_FV>(S@sr?f1U)q7$< z3h)8R3zl74q|-yO004+5?XBgv4+gv-4~}@y`vvQ=gEv@Z2FeS%B;aU~t)w^!)jPv< zclauozcfhr8WbGb)wVI8)Igk=weeio|A@xIq~yKwt6&VA0V@C%1U3ip2%rLT01!?G zVIlXOq88sG%5W>CVx$(~Yz3fRA;TJYo9p&&1B!)%JHi$68#@+Pjfs4&7)&HcM z*~&SLAi<5)9n8K=SMcR)W_>*0`2Gk7;C9(idzZab#^_!`%EYWnzNa3o55fI(X#8ss z&cEM$K&G0}`vF!^11^Ac#vXjiOXwaGx_wv`cT1PYh-h#vLk5+DWSeUg9P9oX37h=X zPyOxH6iC^}AT{gIsH|+o>)D=_c;Zg|Z&G27mzS}yejNounj{yk+^D)NKRq(|D+Zx( zR(1j?1i*O?u3WQ#{|z4nGl>7|1_(tAv?65ND+`8>5ga@<+;OSTPxJOT zK)Cf|$SwEiq#p&0D^-24FTMfB%op>&I7o2DYSHG$Ylo_tOUI?gYQ6Q>Q7>z@yvSOs z({N%80apq3)o3JU3aPdK%6NiJyW1N+xPyX}=>E@7M~tX32PJ~=a!}yq;v4JuI<{=N z;2?)`4>tkUF%*YlSUnXiSgeEA85sGgB=hMv?U3J)ASSVRxYwbo|9lVbPlFNj6Yz(Z zjjyhzdp;wdhZ=d3m}Z?Q()UuXnV$Z^z%=coq#Mygs_f9Q76>06_TsGEe{0Qq9mV4=~2CTyL0U4}tm*Y4p1E zXiJoAD5w%_V?EOK{t`HwURd>&>u9}o^jmB|d&Gys0M&PqdK9!~>Kx4h1pNB>Qvl8na2%eh*7bo<3W^86c1+1%Mx51pF7x6$88Uy{28^ zL;3vWdI1aI;K`_1oR z1v3^qb=Lp@3-clRXa=~>$D2PCU-NnO9O@9y3qPv)y|Fyp%tYQ2Hmy_LLkGV_LpC%?+6o) z;(id+VLB1@RU9Wf@yFNEd@Tr6fsyN>-(N3`=N2eHt!rN#Ag=0$V)OxUm`Uam3IQOj zH20woDShn)I?LMYTfUQ#gpwJh-?_r$#JrwL=nS0Q{uAr$w#D*arJ{@F6zO?Q3JQ`B zzllL*$165Je8k;Qhnj|j_oZsv_pI8FqPwrLc-u)BCtU5sT*=1qUR8>@m#~9n@1)=V zcrZdT=KQLm7}JUt$}lPd7!wQ%@XVv1aMa{mHkbVO+So{O(3X@otrp8}E6eP%iyS?K z1Q*uhx=JB7eoVwM+m`h&#XXIMNt(mB54nY~jy%=GN84GmU8#ow<_Ti+N@-K@a1jbR zUh6bVZo`0RTJ5dhCZ8+O^MH&H$3oz+Xe%?ItbDzok8arZGW6Izf(lz*9Rv7Fv9Lp8 zW~VE7XOm%viNlCqx^a3QkR;!KklhujAv51F*PARr6a{bw?)Jsv@bCN~R&=k7%`@e5 zUKWd>`xneh#jgme*GPnDUIzddV%~!9o8-?SDiM$&ierak*Mk~~fN7(QzMvmYW+(dX zmN0q?{@*am#OkgN2^iRHu6YRNk)W{HF|n5aW@!AzEP;1&pvg$(m`*7;;q(EsN)>Z# zht{3MtnDYwfL-@#0s>D0@2GbhAWe182C$Yea;~XzUo|*clKfeTkF3$fri42nVqB1P z9p$Hc)8ttLqaMBM+)=gy(Zt&9-i~kF$b^XcToOIlIMvil`6JsM!VxJEkP|d$A0b{( zQ6RgPW|>$8bOR~?`2cT(A2YBVP&-O{eVf9BrT?a$nskiMoBtU&yBY1}r9xKULv2ud z+huEmAbuAh>===T#v2_f9T?f49K4uF>(m@Kkgt=7%teID^f^3W!8f)nL#y zlc`V50a;pd5}vi@-xIliLIE*B2o52OqOpQ;f)4i1Y-U2=tz~O#3j>sL$Z%J2@3YOn zs>?G=K$g7Yixu5G%vG=u8#W#F`qk6Y)hcX#Ki#xCah|L zwZe`UT0X-6{Go1~*g%;549%D>2W#I1YqvYa(}Q!3ce3kGvCiXJZrE6GNFN-g7BI#X zXbdu|VZnNLsVorFMrjDkL{Wx+AMexe zi(@G$u1xzI``;GCST+w}*gpm9VDr`XdWw~w>B{*HfIWDQ2xUcVGhhnYrazY+%O5j04KZfG3^&6W`iLhTnz>-R9v$c3l>h^Qhior znGM84=!DrQxQsVf`JNxsWj{cOHVMN42K?|2>AIB%*fWw!DZj`3D@W(KF&p84U>geN zB7yo@l9AufL^x0^-5(X(LQfaKk@Yq4HlJ^o!0Z5=!{Y&(lq_0@RXyV>LP@{;aPNve z@6+hsNnj$s7ty29pmo1m>{m|75(Yx+%Xl)7r-KrUj2GG#$T%=##W74$V9gYmtlq&m z3s%Pv6_QJywHK|>4?_eCrzhAx{THxoADhZ&6A0;e%i{sikTH?IHQ%ZvR&NR~%~2a~ z&XxqPOEz%4kfl=i1r!|%KojtA0WJavEVZYH1#ezVFoE;$)i6ESywNj=&o-fvb|Va0 z6f20s+xCpi8yi%**LusW*ZOmRkN8u4_qZbh{38_qzVsn?ZTcK-^u76Bn@hAUci%3M zF9yM43PC|)4ijPyB1wdxKLYJNrLr3W+|ua;oIE6#Q;ah9gIUD0dH$wzU9Dz6AtJM} zx*h>{76EA-BpFw@J2O)Q;>t}YUo56Ih*dSvJ`}T!H~;w5x>KBgOVE9L77+!ZQ1-Q2 z*H!!-{>DP#u#WfGV@7-+?3Qd46lhcwdCEGV>q^w zyNV1321*V9b_52FI6_B7Bf=U%Q!AW)Ek(&(fN(Cd`%UTcu8wc^f+uJ@8NZxyX3t)u>t7L?!GOAxmQHxn9{EJft%uT$|G1Z>NM{Ti zh~pMix@$6XSvk#6$e5z~q~mpR(?J6oSczz(0l0bS8%e`ul3?+4R=!9|qFQlm)4Zv| zj*Hh$NNj$}-ldm9SVlCJ3OQrg8cCIbF3rZa{kCPKt>A=6P7^pF4FyD=H;si;2@17N zehQcMjj&?Pp>xnP--`BLl>ANhQl^+t6$Iam3j$zBQsMHW20OMC(2g8$o;44Vn;+xS zGRL6KYEE}Qm2ADAnpQEIa&StmzTs(#H+c=g6J`#)QMM1De!_z6dh1<2$I5P%gK3IK zq?Fo+g~Mh{_ydY=Qh)im?gO_?QMC57Id+iv5{EHOgX0ij?3q4jzhKx|F;v(kO!f0I z%Xr``l%S{!0xVd?!Z&r}0lv_yDxEV$2U3CM6mfep(_vsza8@KZ!$lqnK+Y8uFkc4+ zGdn=mlNm2H>iF5#(Vo}Em%i`fV`w#KJNCH|eLnt-x*WGbP**IY0@4i)ge^LIEvAv9 z#L-v3lEJWkn6Vl@*54jR z0iw)(BulZ3(p)M_4ucZO!T;-*up5qXj8H+jyj6G-x|PTVHj|dCX}Ea;1Rf?Qp-Wy^ zILJK+-QOxgJ|LJrAcNv48V3QOYM~)aUj5i1VK{y7@yzA$0=I)8P%ISzG!`JRk-XzC za>O~Ejwan|-_Se$B*V+m2xbr-7@0GM!?7c5MN8#NOv1(-gUS&hMhs$kx@t}trYU;d zSQKbZM6=^Gn7o!2Q0OLO1$seoU%C?VRQ7$pe4gJT@9WHh>zKVg>g*qu?IB5C%Ml@D z4v|Pfp8ncR3t?5Hh;+T;N*Yf=HF##L99fZ?o9}=dP7T~))8nELhr)u8S#9J0Gby;a z$7P$6@ArT>w|4rS;5J)k9!Kbq(o4brK06^xy!1l?0Pqn30N+aY30R>6kzUrF z)#GAEajdzW(h7lJFMb(8lnB=vsv&%X>~s6q;?~`{eqcn8N__{;wDJ?6Dh-3*jus@LJO4j|?wQ|HI{kI5x=Dprf>EIXS|W0OR(PVj z(0*L2=vEWrY+~U?GIGC*Ag)$#W9Kl8SByt}2Py`;{WCKG-v-+tO|IZZZ&s&+-ZGpEJj`){Z}1(ePXXqEc(wc z+leW5UJ1sF^tZ~jo(;;=Ov3Gw`x>qe&&G57VR1hgTFd{csO~4?i^WaOf}^HfRxAii@Q>ua0Y@}RQgi-i!M`UwS~kX%QbAGp;8Y${=$7dEmHE{83~ zF`WZKi$cVMc0SWyG^WcN>4b*JJ1nnm_TRr_g>*i_w4V^ido7zkzA_*qplBaJ{DZc9 zGa+&E_)v0g#e@Pc#rcO1z0|=^>O~jb(5{RiS;&aX2VmvuNdpnkN>O5l^5iNN*Z;;7 zwbY~O{SzxbPWH`d>^XmeD==KWfKObEeb9l1L% zVAwo?vttQqt+QtsbD+6~8zeXWIjZ|K0I2m!lZ3^H5*{cafwUAP7C}N9$rZUC{kzGH zmXL=#u+c}KxUH&^U*byCw2_KPwh%P3mO%=jD?0~A3M%|J>~^5$bV9fZx!U`u=Gq_- z1O+(U-u@3&@X+P^a6g(2O}H?Nd>Mod#I_8?p+bNA35BqjQoD5OqeS6ILd1kVm{62a z(+kb?ZT+6We4?>_QcwrdV{FEzA=53;AqByZR22lm5d^5iX5!;UO;kHkf|tjbYvwF^ z|1bf@R1^R>G*Aw?m#1@^oZidHmjbPQzuU~RePl;;I}j91@7x$Q(F28u7>|6+Ma+4G z=&Zsbgu?BNjQBo~1$8fK2*TqF?`gklzP1(@$|3fCFW5aKmp7)Q?-p^24zEzfGyJUA z=tC6-Di0L~S`XEZVEzD40~mZa=|L(=^gA$+v4JdmC@GI=N_Jgmk6+o8oZc`2U#K&9 z^9Y%^xoB9$e*cp|Vg+Gvs8JW5<1=@rd~F}Wk ziqeaUFaBD$n7kzk3kV?Gy#Hb2Gpva5LMf8eGzWJO5L^Xz*EI772zFPy_Vib!Z!W>*POm7dBKACCU^9Jv|Nm(qS>Mmk8iI zExsgize~It#lhH+s5%BJ4CGKY5~zN~*ftHc*ftNvbAONa2q1Tw>?D-lfj{&g+k*tL zFg#9%ZlM)YL=P!p@^j6p?66U4++RTx>*EYLf>M%>-{F6b=HjV9bX^DDC|f%Ld>~FU zh+WrQNXr$Dh4~9(|6&kwDu9EPQ@=eK%iNXIgi3YVI`gyl(-vNV=oqkbf8+KDd1x&1 zF??7u649f?#rcRk2Uy;w-^YgmSE%ex9kY=BoX??KeAq!Wo7fsNV^P{wCzi!;j6Nr= zXLp%WdVJ1vaem@3_@?@V5G@6pzLx*E;K(H3Z;Djcw|e;rL+x4?_8p7${gyF|W*-;} zKEh5PM-50f`^)BKois2)mW*GiN3nXX<@6RnehB8nkNpS5fvrnWeiTuEiA{g`;aI3L z(0c}QTK^x9{Fx+)AKV*O5<7#^^vmC5;LHYmZ_gA%VpHY6i-0 z_PYo&q2?I$K9C)z`t|nZ^C7Iy98m8v9b|h34&-#bZ`%;Wq>n-pVIX&)J6QOW31MU+ z%pZ_gN&^Sv7Jk7k1_#;tUs>z#jJ>L#c6KJ$5KfH z<1HUR@sNu-Dj%SBXa&mz?^gG|TEezP#qY? zQ_h7GzX<+o#t7Ip4`BPtFj;fs#O&;P2hcwlO#?vs2hrvv!Of5V&{xvXJiB2I;%*3- zEECo61=D(0d=KDoiS0%B9+s_(##w1F;~dI{F%eA6V4;C|moP?JeJo?6xt@84CnQ@-7GN7!6kRkheYA z`-dGWb4B*rJ%i%r_3<_<6^r?aU&4DAu^;GxJ3q^^x{M1h6AO>Vgnih^Ap7~^3U$~% zOC|jX>>klg{vtvoBan#**P(`>dI$LkItRQp9DRde<|Gc}hjLuRrce+lGzjCmf%dSI z=;v}oe|(1N_&s}X{&qGnnbuXJpg%U)eSE?klC?VoZ3>a%$2mhO{d|<|%{?X`HVAQm zJW-{ttK#~6NSEMZ%fQ;=EA+QhA8DPID~#JGaru~u!?AB64~Y>n!<-k6lkn;0NS_|w7H)8rPQ zdnM*C&_I60Nc(t8zW`gqk|*AyY4jZ~Ou@|HaQFCG$I@Iq1C=mci}TouVSAr(&*RH- zdi#LjJCQx`9@^@rk2V|~xdMs^w*b7r9+=TCtw7)jf4o+hgf3?PuH-Wk@gu!4<-_V{ z*rBGq*($~*s~v+8=I+5a9q7+m<>XCw?nOY*KIBXr0eWHPI?_85eb94}oMi)76W8qf z0y$BFVMFCkRA_X#(+vu;weH%~=~hPyKx(^R+Suqp1|AdOzWIo}HN~Av>Dt&%HV8P@ zUX33{jlXosOj`_H{*RbFH0$?HIP;JqNU=z#N3nR^h1$kT7KaTq78b0Vx*{74DLmcr z@i|7!;G>OSrCWB|*8>v^g6ZUT_^}%R4YP!ok2P zxOvLKo&nbbIM$pc7`gfO$7NIQOTXlo)8G86$IJ?Xkf0)bO>4-@xBWFNAdT-q>5BS` z%dd@y7nEkh&`Jnq%lZ_9l8pfM@$Uv&2+8q?<`D;1>i{Y5F1`iODgY8_UHwz#y+mh8fyd6cqC!)GI=mij$XR`lTS4PKTBa5-oJOz#FQ6w&*AkmUbM%?yVhcB zCYW{-2Yh*nnqC-AE!Sue;^@HEE#+$xgYe%p9z=7g^e!}p>{(O8u#7rEA_IjE6hu7I zP|@fmxJO+%A>wSM4_?JG=t`{KU?={A+VgzOOzZI-;{RC2Y{5^OPvDZh?%!Ry2V<(} zSA57OjsO4(Tp|5*1~|SFA2;%69qUksgl}C!AN;*3p-=v*DiC+CTqwU2|MgrT_)6{a z@q90;adq)_dw9BjUb6p|h46Yqcy8R(Hbx3Gm*j849LxxX$H2v~<0@-KFRK1ZggB3m z*ZrSgE@5BVXnqxpU;ck!wq|E|WY) zf;9UuK?6YiC7~CH+QEda1NeSzA4Ean!iKVKjql5N-!w5}^t;{PL(Xx6uG0sYl$v^D z_G@s5ggh5{7UD+~VeKymZV+RQ;4<1?E;8F`q!KIL(EeNYi1bldEW3-$Tsu{}nZ>RS zo~Rp`l8>B{ID26T@pHht@Ahwey<|fDwaJ#?4s-&z3OH{571wsr7>PtDup%Qx@WY!{ zjBDHG|Htr6e}LjC<+zMjHbmyi%x=+I9S!=BI23yQpbg#q#H!=a8tEP;0`1(@aeYH z{4_qCvgCy~|I_~c#P%EW!tf={=p4O)o&<1M3*j(YD1jL5^PLEEqxRU>u%`_JK-1dz z-PS7j8d~jTF*TII9oSKVL_h{+(ixwpn=2Dj{C$Wmu(~_9S)5Tm>%eoPkEMP={ zp@kgs(&o=_w+dg{ArJp}K7yK@qY3uB>a=NVF>ufLv`z|Xlu0$#rm}-EnN$=()sdN>6j2U$3`W`3pA4I ztb{Pf7-X$AgImwR zb@sVi&0SfXAS*i>)4wA)AGm}O`6i=3o+Ge*`UX;%+n_eA8<2Y$-+Q$`YH1a9f;_u^ zE_5X@$RO(Yo${agvp8?C7hEvYJvEIAZ#o=g&Q6z zoDR*ncOpJ+$Zp@}-7`n44Frb^wRWE_VFTvEgQq1kzCZm3z2$rpydNMYr5yq+>DS8E ziGrYu+8jlL>nP@eSBND^_cX3OKh%3k-S4cq7bNrq{>itKxE} zDFWpH-*SKYsM4H*&t6)6Q-YIM8{c4HKEduKzifoM9P|xyYF`yCyD@z22ksUxguvMx zJFn;qFK6WzCAh6t1o0)`>!IBmm0OA>Wi;1s$D6a+_I{bjzvx*hAZb$Z$vVrmi;ZQD zrY!pga^>I#73-6C>%t~L03ZR_RQ4urWp5H=3L%h!8bQGVs5xLl+suDs2D>85yF9&2 zBqLbW+6FxGrCw$aYhM!AlJ*y1m8J*;3}Fufi&{{JJ^?y#mpfUU8~hpEuHdjF3=TFG zKn%PAKC=mgLw^mATCE30VEC7H%x;=WnD&Q5?e_BT7d#XU2@r#+#5CV8ZWFR>BOz=b zkBG=e8D#_aW?4@u^89PC!zMpe$R98d! z3Xr_Dy3V2yRo?_Ti<^E?U-jt%DO(WLU_g0}IWh4cpv6I)=2{Hoym`9x4f^H~{;*cX zG`;y-a4>+D5*GxNF(L-oSSTb*f_Fv8`JzS*N!P9f)>qgA-i3A1eyx&|j*sgSd2ge> zh8EuaZ+9||SNwYbbRZhALQsUszo|@ge}HL&;yE?H)Ox3H`|F4Ut_uq;!*x?oBk1e{ z;u{Jz#VW~%;y!E@mMlbl+x2y@i`G$<5FmMa*13| z&reDOq)2l9`Fax<5Zas&7ZYM$T)B{<5YJ5kMfWn$2SAUXrsvsojwP`rF~~-7uU?-G zF3$$NfiUz5vJ2u4Z6jF7y?G7vS;U*8_)EVj2V$b(6&yK~nqVs2|FPq3`0s>`jtxI* zT_V-=K-4gNYA~s#5HKV0;ua*0Uu*b>c*gy5*CfE);)aDru^lO~m9HPfjK=eZhJN0B zz-R|LVSr8(-*DSeg;PH70oFS$yo2V#wGzLC_w8#)_3ps#_Hkk0v}&kX=<>9^6Eu2S$csIqRViq}f^J z*9VY{2w)1-7(-)5&wD*6exDbKSPK4R01;3_iJb~m-2YfQv2Y%>#rzwB31(HMbyXSC zItY7_xQTCDAD+QsIIk~#yq_vdwc_)Hdy_ucNo%uB-i9nQwH^{)+TKcM{R z-j5w|5qvLyRl=sB%Z3U^k}}-}7+}N^sbZ(9FJjor6e7GOdT=8cZ*zMQtp!<-_#o8` zAm>mtfV{I87Z+8+I}`9Of?{jSXe74!K8*zNVw;+Af`mNXEI z>D}GQ;o;(d0D!;o{|>Qm_rK=+0DsFK7<}6Q;3@+cMe#n-% zu0Q|$d%nB>|K$q!chD1guKBH{uMWS#RT#VzO_1L!%58E?xt(XgKm0w1%C5WofAzip z_v^0u>;7N-KM6b14zK#%d_Vt!cy#yhiAKMN|A(O!E%(=7|Njs9zyIzl7vF)z!Bp%v zOE$mbR2!UUY6GuBRq1}a4Xe4a|Kgtn8-@#@DnOllB1X6q8~aw!V3UMVZJWKU38 zTddQoEN#$20FXFfG@K>^p9`%2!|HF%A=r+D(s`&9JchEWacg6yU(1>MC-~az$h61l9=KB6W`eN`91Nmovg>>2Lj^;D}{eS=Af*2xetQ=zT_1E}+oFPwu zKADClE@%IjXkxidPXtY4L96=z!yn=A0qvK6|KNkbaWXA0|NJqHVk(l9-6&(%rEMAJ z-x|(qZjO>q-oTJp%${a;NLIIX5sj@r-mOUJ4K^AOl`1JVH@Z-SQcVmTsKR2?CQi5z zI$+y#hvD`YYA`KsHI})ZDf^(Jeq|+f*Z=(E1Tf#FH}drS+<1mGFaq_$S{VHX487{31A? z;79rS1{cT>eRlm~N)7q00wgr({4#&|I0MjE0@kU~m5N}=tnZi*VHB1!mx7dFrMN1> zS@?gzE}$|6VP~LETxoC~=Y^hN<3W?an{t!y{X-ddF!BFi9sz(o1!-EC6Uc_B20!M} zbw@x{NU4oTd=I-|^FMf*FF0}j0J!usfw8o~aF{dmyq`ojI6H80v$KmYypE^O{pkH%(p4S)PUPaOA-GI=OzZKv1& z{+aS#?iRJmM6}>D=VwQ-SrCEFNWR-(lG8eL-HP*!I>mAcAaHR4{d=btM0zSWDT$~# zJ@Fow{UL>x?l|}Q*xQ<+sZk1#QLqqF6;9dvwTri(-4uDhabN#7|J*eLm`}1_0r=x6 zi>EKeTm$Gho0&{SeUPT6xS%H{>Epk$Lm(N#jLB0XJ7`QY!%`|Uj35>FSncj#$oJ8us*@*bA%)1B!lW}eITeeOhM}4*!MPegU8X&82|t9lxtGR=ha=y zVel{;QcUr0RXJ01&K5VH!ry8J+?%5erPt$nK8U+iXmj=<>GeP?d4`Vj&G$9k!h(JW zG|9~%x``mH5I>gnG}blFT#;D)jtrWwS%Rs$)f|^p&2VbtHA(-J)u#i9`(GW8&YlW_ zo6e>!!KX@nRH^k*RK?xX-*6B@_|^+J%-Qq5iyHGs#YpKsdxU;|Va=rDyeSXEUVxl@ zq`PQo~*b&qs_%w4m3BF_`c*lhiy`_ z#{*1h?nER7E>AdXI6l;n2by9=H?J?{Oo%{{Fr+1MzTrh(wu4mT-@Ioq;71gxt4Z}2 z6TU?*c2O`3yyl6($RHNs_(l2&RB>PhNejyD-`#yM3WCc>GdLUzbHO0+5*BeG1p~lH z^zkQv!2nD{CvyG{-^1gs0aaaB0oiA)&mYqOOtJzN`~3DG>OFt|+yDP$>)he*x&E&S1zu?MBq3m|8r8kD*p~9M7$UtvE}s|M&&~ zg1(;t@1>{mrU8HuIe}iOEZ<+hayJ$yLsG7l<=}9;6{9Txu(LC}HD1G*C_dS}_Mk}X zP_)N`mI9r-e1VHKz>dlh0LT&=ME7>a(^*lK+h6Fu4?tTuFjkH&tQ)5Sk>hf^7{Q)yQHF7~-ZDI@Z` z=2@a6hq>iOd%uk#A+6a|n=H?kmpYH*jztZ9O2n427Ai$O09OEyX#vQ)Hug#gSzOQz z%Cwo||NUYsnj;{2uUh~31^~uoW&%C69^|CoJMe_H7q|SM*l*E3igq$%5BBgKI@aA+ zKey{({w8e^c!fiMv8I(*I|1H7FdR-Wa1ExqTlf$qn;Jz(9Kh;eP*zeIwN~xt;a-X} z1p~&#nnlFNJiqHT)@z2WNrbe}!BedNW^L@Dj_kE?$O?+YNAIolMIN8c9_xFA>$m&* zfTg{b^OFDn;UJlaFc1lr+MlOAU>FbmhA`!i*t2;K^g7QOsK1V8KlpxvVUTdelHY&- z_!A&Qjpq;mCnycQdjw?Tz)}$di5yrQVtfvZ2L(U#14(EceU<@8{X?Bez`c@_~5m>gNW;lIOd?ZWnIAXJwATO;>C{qKAjL_tK; zbd{or7=b_rUj&QDwBNRX;VXorN~wY%Ec>YbgcJ(L*&&1>_}58Cu~8((T!-w;H6t{^ z)8?3r%lRIa_o{ojS|1WRq(bau3RkUv|Mnh-*=zr-KjF+K`Uz1~%qNDE-VOysRG*da z^-mA`6SR}7?b8e;Bzc#iAi4sjo~AaU8_V8c9Tn5v%oaQ1(s<%Mk0Fj}ue~l$6GmdK0e0;ebv1sH!=$^|ElWuEnK5?b z$HSl3_Vw@o@cRm2cx~qpd2l*G`Xu4{NJ0HRfgoju(_=ho6O?6cMRxP*B0niS-lZ0+nNh_^y%ri_?SdBXEj!IfnCTVcbgJ5xjUD zrWG2242}t@>L565{BRIOFZSCBxDc*c2V&{|4X*3|&Hcmc=HBJcUY~yk!mOd~VaKi* zt4MEXMH;cu4v8D>+={a7$PoUY6C)SSvhlX-Gcb#oKF0Ipq8D9m!Y@oFU(-(dd}aZ& z-;cZVWCs8RTmX{Ku)+{Vfmu%tD2s>Rb7Ah@Kp zTS{=$is(2!7$g_eqm8@>9?ZAxZ`~0K#YG?Kz^u%0EBjsws@X83kC64p|1Ji-`!?}g z*B)Ct%sV3jas}H}%gS?q{_6nWPuMmMpagoD0~!B;x5^uZS5y2^x z%m3~9|HsIs=lgN*`CgiHvgU(+8HgG5a7E_?oSraV3g+98k1txaZPCb*Xm#CJ1Ao)= z7jAM8Io;TiV6}$@inDju2^Fp)`v0y3$bpOA)GithE2-K4NPIYdyG+uDfBw&w7#@3qyLn162Dg`Qq|@WJP(J8#!f$S0+N|HPfyJsR67R?{sT>g!dRf{)RD_!h zW0sNj_(87C=qGQmqEAP+t>N86Z_}n>mrE9DYw(t(rzA7!XHs!~T*lYmt zM#!A8SJ@(O{eQOZ)Du?S8^hkmbAb$!m9R~3^AetFh_t2C!T@B0mV7y`|#$Fw`2^+j+Uh~ za0iR-(D(zI;~)TIG8buV2F7lh<7%`fXf=X3v$(9JJhz1|G|F~dO7q5ZTGy9so(r^ zjaic&3O<+c!1m<3;NYPV_c>@CYV;jwi^v|Hgj;GR*tBKB7I{=aJ{izdYrqZ?eKxb- zz-644DgcOuz;+BSN6dTJ{XXxDhwualnJq86QAeN%(ITym2%cs@4)Y+3@Ax{&Z$$Bn$PlEh_lc-Q=m@{y3Fcc8980tHyuN1kb;HepbzN@ zTrtBb=Qf8=srWrJdU|-$14#tX>SFd6Gf$RgYDN3#01$w5+HA2dm1jZui6+GdS`bM2 zt^>L!pJ?@EWkL3^3<3VUjf1%+csDX-gB9JxhuD3%%UKdBdC(m+yQlwu$Ah)Q>CJ3A>P<;gi-|rrXoLqO)`7~Qlpg1W^ZzB8 zZ{WWcWAsFNetPed^)G6t`KJ5{^j_YRO8&Hl1;H));-bbnd#)JYYu#!Gfs&IeU)@ml z!iwxGVb%j9&~oWEx>JA@0kz3!1n1__dswE}9Xozt;FHw*7cm3Lahy8_xn#kssk~xG z;f8nVkLHY!ePHpyQ!r;#jaN-9dxkoXlKeFdzIJ8XgC4D-_qaHe*(}9#xsDZ3nRIQ| zftU^EoXqUIGuy^2AOz%Rr<+-^ko>dgc5p{r&5#?%Gtrp;T2REOUWO2JGz2l|Mp5-e5Nm|awjuD&}M3Tp5K=Y zOCX5^ng7g+>$8y=0xsdXrrI0IcW1c)u-p_rP;dUv==5Pgi~%TB>~^#80HtO4dCWgk zY!_Vo%)N?np~xPD6^j#)Ulz;%(SsOapZ;gRz#Ik<&TTV)_yBe@0pHU;Km_gTpsS__ z9tHApb5VvR%ir`$bfRoWLqsvwlW-2jXY(x!$@F8yvx^RTq@FNL%IY07_@YrrVWh4T z210pmlJU0i1-ix_+Tlc zK`WXAU*UI_1}T;M`~t6C9+WX<0P8|a`ak#vs+k$1p4Vr~m&k>2xIM8 zDC<^8q7QMTbt3-*2cu8T`~)MVjNhUPV1_|-{&I)yuP!a!POxo zG*unb#Oa9@wfO@bbc#j(uQ4P*B^DYWzJ{TW@sGt!c|F%|gJWq_?3NzvQpvZ>06*l3 zA)5(llyj`&TK!zMj}yc4y)`G6K{A$j4$W{AmbcZSp!J&SvP3b~c|3Kw)4JbgWv0gx zJngpM$L0WxN3fiJARm*uDtoHVxH?+*(E@`Yx<|J`XT)b)_0ARN5HPjy?|=$W1>AQJ zw4msS3u{3M`%R!aPcp%JNBj4N9L|(i$CWQKv6X9%!T`ljroQ0z^+l0>rZ@cwAm)ek zK#Yf41v$u`kb38zx%wfint|;m4hIYNw<)e503a-A2L3#08u;~N{63Zk&#AD2G6+z% zeWG1pXI}UI3?oo`!rq(TPp4(Cvfmf=|I-i{TDCi7Zg)v}M$jW$4>8b&HnnSd}>*dU2%uC$=E z?d4zZZ^9BB-d-zcnP*^YnDBJrz!_0y=9|=|8IsW$T-G!GYufPhkQjS-uk+Uu@K`%4a;2qPobnr`t-{2MUsox6#S`bcGuy z9B3Q)e#@$=3%1L#!NH4rn@1YM>9)0B*lRmcQAcVbA`vb@VrB;$$~hmgVVKTj0F8>0SZnnaql?3voUv`Se#~>RjVJ3*FPmfk5WJ zXwTp9uZ!8{{eB;{zTy}_U%XW>DsMS7Xg0l8Db8jZ5U4K()G1E!$ofea(?sCTYqv@jk1 zKhkf~3N;f->af=LK_Xi7y8tqZ%z@$`o(%aAvlSf*mtEFsP0ztE!={t~0s-CuC63>v z!@}E32m@<3St<}ZPbOC1=;CzGnYe(LY3jQ{Mw)>GxP(wSXuXMuT*CzH652uh$5G{R$>M?k{8Ty?~-vVd-2dm!Lp1BmCzr zT$Q6?;iSz#5}H=7LV8Op&7o|?rYU2vr!%4ne*OIb!wdocZ;hNh0H6%Q{&0m!?yv## zqdcFQBKp9|=|mw{P-gC~B1`cJklvRchm#AN4ZsYRA)Q_ADTg6?fL-mBvtQ0Ks#?f- zqsIZnXnedmfE;+zvW7GHfu@ntZ8rb^|0-V*h~Npo$)}xhG{SeH=7`X;k5^W;R^KoG z@n6%RgtoNzw|o}8b!K1c7{cK1pxKAjyaBg(ENf5cTw_rd?;_+fNSw}ZVtRk zlIOu;J?s4*daQn$1r}hvZ~q0v9!0kz5j{;CrX0%v=JB{~n-hlo9BIePQXY9C(X$r0 zBtCpmvhW>d<-ZrsNQ9_sF`@#{HgUaefD=kYKz!xBK9$f&1{zXeyOP6mepTWF-nVDb zgB9_ARn>5cfX#)dzhfFN7AMpkhM9>~X9ylQ;q?h4hLBAX5wRmxcK^DN0}=f;K;=@| zmoX$7sj3w=HEvnf8WW*WG{}pdkl%yKXkGkg^@nA0W5c1mQ99Ehw;d}=I@k#yh#F8) zJK28_fN!bv-?QaJ^Kf6f7@7@ZmR)h6?q#iiKk4)sw^h|@rM|l9fB&7BVh#_)Oyx|& z;sw&dx+|$vRSZZJqtNdNNd}RQs+doz8fs;^d2GQ_X=2Y6p@9f@hX=TR z=QVEozW>=q6aw-dE0Bo#I{?PJ|2O3qWe=o-ry*(X{J;bL*x-ljNrP%PBu?E2?&k4Z#7c>%lo3)}J;B(nCMW`Af$cG!mIKo9iD%h{Jx2x=i( z_)8G21rw15chcQufOur*O6RDd;19jLIc6cT4pov2Y?EbCK`~XhH-oT^T5@CAP6*A0cKMT7yrqM zOH6uFGpA1W{s7e>E2MZlQyQG03>ypJcVig8{1BoD5mbai>@k9IU;!HICN}mn>YMb2x^)xSTja^RGajxsn4@fweM+01Y@ed#6 z)zE!d1e{?4m;5XuhEIW+yC~CWx?ms|d%OhzyX|Ly0nP5*`ZWoal1rTvS8k_-aeOiX z=Z8QZvgB2tjxg+(#b3+nVs8u~egeXeWWH*6MGDlaV??m+eK~h58i{HKw_(63zwub<|>(zNiu2*@9@JKim z(>Hdv-~avyd;!P8L(Ko~7%V%;XQ))`7G|ssf157f)7SrR_8hf)PIVc3zh%)T2Lk^$ zmpj}9ZC?GIDS?nXUvL2u9mggbhc3j;d#@XXSq4wK#HGZormqkILwwnfdXWwTwwiSK zdiwnar_Al*`BrKFKtKQ(7Yu`DHpaSdaR=MXq8*%dj5NdCf&@Ba4u)D|v7l?;e?ZTN z<6$=<0CG%bP<4f3B$yc)Hm`i9QBd-2AG|O%On9K;GOA(Fmc48J-~NKo*=8=lb{4No z*d8cB4%v$;JHLPb{fDLr?pvyFwfOj>cs7S@@Fn&hChPzt4`j@Aqt77&+|wLrR4+nR z0wTYA+Sug@-a&iCH4P>EuzJahetaBKHfEw+o*RH53pl@ctZt4on+N_2|WHq3P+gg^}5PGbS6rII};@ zu49{SknH=t*=m!1#{d81RwFMzQ0-xn&2QyrBf2CJFkYpN!2kfim?1xOo$D4l)&Kwp zP$A(21~|Aw|N86i{r!I74aa}yU1^ORXPNy;z~5^K>)HI7`xb4M>P~_#Jsq3 z^~)}%5Pzkc;kLt;B5g`8{yMk z*Jt$|;R!nH^^u#+EpqggBCtdk5)tMkVo|Qe6hayjzPiv^Z?-3-SXmTmK8xgmhtPSJ zi!L|C2N%;0{3&Y*#|MDd`|s+xf>yrlzHq4p(D6YbsQ}wDl<;TF|JisBvRYpH0^kb6 zgftSEg zG#(2M8yFG^j~@vP5yB^arXl#OwbD)8cby4l*kEX=D>({I91MbiV9NpYQD3;>8D8ir zFb~BF2Y|8?AY5b0NF9he;}4$H5;GnB9kEa{xTrc60R|BT*S`ajE*zY1z++{D^j;6? zm(D`bWe}`zh1|^tTr%`0)t_`9^|wxMbb{hUiS9>gZ{w)NOH4Y{zCM8K4pPqw62f0; zul`=Yj0dx6z;zh9UC98U0EHGH2A~404FDP6M z6-N%tsy6}Q0f`O1-rh`nb}P&A!o->|U`0Rge>ums{cGnj5o{eMOJ1cwlwkwX-~113 z(}UhY*O<7z9<$@iB@ygg=6D6WBZuhX3Lseq^DS91F4iW2VE!q^~?nbIW(-tRu?C@HmlN<`@Y_kymV@uzw+4XYe}hCiS z(V?=#5a7iL;No0mV!j6}Uo(So+aK z^waDAMpX)UOHA29;`hDA&JX#UQe~GH2dIAM!ti}hF40uNS`bkuLkJAX8JwD~q=D&dM3@&oM_oKXk>EeC zl;jgI_5WLN-SjRTjJcj(UcsOazEpB-hqOCz`niOd< z*g;}C27%IG*gYWz&_2C;(+R?mP*E+ImLane@0DC~A7wSnVK6*^d@M&a?_rd-veoE_ zRZ8HohY1~Cv@!tj{`q7f)d(qj`|`dZ#e;wf_y-d)_2ujjAc$3h05iHD>ULq%>9I?I zqU-z5RZCTrh0ia9g8{>qa7-f}rQR>}RrxN|4o%hR9Pzh`%9C!@E*@1I7kz)8Hb~*A zzK4VGSql5}bPNNR!G`7~tC%H5MtFjRBb3=yM|$r^STH0+%-jM)Tc9{T#_oK#Q0XfI zPJoOl(Lvo!qHst1nCy_2c*3q$3~f>cVn$8q1ux4$@N8~nN*eW8G653CbEkX7;2`ka zKKwn8S$-NHOezUML+&dV75O{*WWU$075x=U5zxCFY!ex`+y-j+Xf04q3Y7X z7|&cHJV(!q_vOBvh{M2HfcF{S2Bw&UTSlMm`|PMzdkJ^&OY)lrz0Z@#XRix_(L1~6 z_y7P5iy`6&1~|A$fB*eL6A$cP*Y^$<;f^QzsG%H|{}l{>!qr*|5fWG+2nzCM3(04K z=y)l1~dFZCK^BsV|M>n&A5n(^otkYW98+onaX&Kdbh$a z^pTqvhf(Yts-NRQjJ{XF1%w}IC?M!5A{WddH8yXD2re05rCL0RGHg^ypz3O|F?Y~g z9~6bd-uxnLJij!{1i=q!=B^P9n-z-RZ@er-7*-?laJo0Mutd)wW}Q7>zGoy54rMkW zNi*FmO@i0l51Ngxytk!WFhM2{d`%wx_J1dvdLY7R8V9pyT{o@+QIJ<$3}Ogft>DCj zJlV^p1YklzrVW7rc*aP=-r_{tN9sA(pli8sa zYEj}`*zWTRam2^P&E-o?3L;>DSmKBrL&E~WCB_)f*hDpmZ^q)MbO}sQLO8fPP!tj~ zY}wd6N@=jZNn=omv-SDrsScDC0~e23WCz0^vo%k`z~hGkYq{hH(8wi_JG? z4GUF3m~_eCt@nDSXpFUS*v?zHY+|@fa*qFJ2*noBj7{2bS0875rSE3)~=*^f*z-1h;#WMi+rP2d_1%j8?%i}E) z+DPSS$47C*C@D#g%N)8!dw=pXSLa|&ZXITcReHkZk6FG~=7tFxK_f(;=Bf_XM-i?%FK$RMli!$lW&zek5q%2L_ zP~Ads89TFBthOcgCH?N(5_bf(3rw~vNSe++vjEKv5?kHioibnFFbZjcB4c4Doqf5L z(p1vdvP$2l8xY*zCIfI0g)%b$uaA|i(;O88U;>o@ga9c}5daES2eG$4n8Ym1m!Opn zEwo>G5mJmra4*lL@MvW%k<7T9j1)gH778CtwD4GhPLJ{7PJ;jc=J<}LJ*0d=EFUA9 zpDklhqslRhIQljZ08#!+ch}ZS(IReYs(T+)%JQX@hZs1rOb`eK4K;|>Nb0k6M_X-L zW#*0!-OMajyZ18g`zgf>K@#OLyZctC=4?yNs!yXl^A2@_0DYT&3K9E#H~F+XI$LJ# zC;w;Yrr{Co^b+4Levn~xs54@YZX^vT9~(?fhF?vrJP~L4ychIZx8-Af{qu0*{j4nS zcfWw;HQ;f|K}^s-G``3USG(!aNwXo|tb5IOmV$p%&r>zDG2cxJUql012oX zgV6Xy5`kza#(a)d+ifx_L5lGv9X=V|*-4CPg6mIRWqcTeA3Phxu4`0kvP{VUQd0+| z`wbvC>r#f{H-vi^veo*an#5j%aPn&!_c9rwtwr>&H*S-?{3`lc_x2>kD8VMs>O$66U~ifM7w46WzsaiZJq;ciMivuZmM3rziU95%rinKlBpN_4}8e zk7t81n2MAKt${@cdtpmkhGgeW8pXOlEqaw<*|5gAzB18_?^f78QW48lv>7 zo&%=9r8#i_8obWjf9v@C(*@nsB_T2)*tVANg_QkqYSP+?09L<6*`$DEAX4XY#TF|S z<&+LQI~M7dIHOU_U_W0oogw4f1cK|Mre6dwXe$Xp6sWyNYP88+ljat8|D92&&CTbW z_s6QBas+z!-ORT|5fy=W^d*cUcfVm~>frl3%UHZm;pf>%UWcOlv<_GeunhPp^@(&H zSRJM8#(-zwiZv2G0hm6wYWkJ`2Tss$-KbST17Z4E!%qJ2A%_7bE^b(8?pwcqa-mH{ z3{bs4vT6GsezNZ0K{7$_^gryrTX0by!{ib`W(q-B6|(P&#IOdfn^;uUu78d$hE=y> z>J!|73y$5#L|4`W@D`;fAP6dR^F{wp2?pwb@ew7bCVbuhP4g&M1Y$KAxZJ%332v=;IjRHUqJ+wF4|}KUO@$5*gb>#CkMt5)DFF?2J#`-wO-fe zALIOiE?gLE5v|!srvLs3R1TT@kb2CSgpKKPiFCko0BHgSL}JQN2>1q!VFa|Mktv@a zNHJ=fb3Y-0>6m<Cxhip?Dts!W)tjV=bc5U$%6zho1YSS;lNHfLt)~f^{^7X=ROW^!8;Gh!^1)(^O3J6*ze%AwqKkLzq#q|{W9esGP zh$C7rcEpk7QP20QOpSy4?+Fkfq`$MqnzWVJi>LEFUX3XqvEtQ?3M@}3nn)hY?<5Cd z4h6mO#nxMPSyLdCs1_*iu3&dOU*oj4%8uw2qnJJKjp9rU)|x(h{+}&SkpLJF1bS2u zX)(d&9IXN2h^(E6NZJC!tB|&u-Oy>c{r}6eCBwy0)3Id$-IHkJ?|brN%!e=PL6hJi z7cmhu@F|A_fiHkwm`iVbwZi~U0P6}2kPsX?5tkAQoslWTtExKsKZzBUq#XY!oF5U( zy@cJmMp&un2rm%32e51&66X4U=qht!27$+X+=#a>nlKEPAPdsuge0+u-m#XB@K7!} zd<0OcAcCQTi%5rMG(yGyzKjqX=M3_`-Ac;i1@#pW757i8>%jGqf&m&dx7LRqTFYbv zL9yZu5Lg(iD;>FJIA+-Qb{+uzf>7zS_hcb2eMU;h)g2C>z7-6N%)-b$&>raQ)p@T8 zq7@+ok1+rLK`LDX8fg9?%pyWML?ih52(XU61OS zfcFf3V}B0jZrn#4&S;hO&RV4~5hza>4*@u24_Wx};Kpa%4&4F=?Coo&A_w=Xyq_v| zr6n-&FOR37@EFTS0fDG2uzsz33V`bbnmp0b9OS9JRajNiykQ zbwNSFNC?&azN-AZ#Nn;{;1d1+p8{d-e4wUK8|Bts?2eLm{hmmkEm@<=5)35aBk4304J8UqxfM=)Fk;jr4CzYu_nJveua?^ojL ze{>-D{-I7~GK+-5E6$2!v2tEK4g=RnDd>NWTzG^RhUWl1Bq0=^8%vC#OQ@D;a>xn@ z2s*+n_kc0Y-mdf|rs1PVKdC9ehMgui&d2++-U!Zyisc|^rhp>Gs>lHR;dV~}ds8!eY`EN_t)4S#H z6TmBh2ssHsm^}o9?w^YUK&Y?F!=stqy|Uh^*mNl5c3s{ONObv_?e+J?(3 zg;J;p|82a2(|x?Ib^Te+F$BjY8P5lQd-n``$nE+tPkiY*V?#M#Ud!n4w_jz|xa63evn&wL+vEXz$x{31ae{>6Lc@Z2aK98m$`Q-%URL9~o^ zQwJGNg9_T^~`BN^p-(;_w~K-000CyA>#}N zI^i(-{5J!E4UOXaL;VhZZe}l+N3Kj?T;37l65{=OLO<1fMjUEZCBBE#j7Yc;7zg7f zt_*DyRm=l8K;QCav3y@;9U?=A^3#9ad`DgDlPsU)$BuuO!e$SFkomL1J`b_p5%PYP zE3@4=RS&qiZ1_FqY8ZCob+Wnz6E)^jsxSN&_Z=J+^7Y|G1dkJwD3|8VPA6*9!H16~ zDJdSk|HhM-HU>g04w}J(u&kja(3si@AN)RDfRYu2Oyku2;kmR$KZqX&Gpq^4e5SaB{*tM7NJDGO@#a6{(c<`7_1ul z_ud}&5tr50ztApSSYJ5?f|%nk^wZN%5yp0kM?las+wY**K2_)bf`J^Q646#9fOYDp zW=?ImhgR%Y0brj)-~vPZZxgckvCM!j4<%Q+M(lFV{XSU z>d#d5)#E6rWq89{Fc|(5E{=@qcm9J47v2A%D(fcZD($Mc_~H2I0SzHWV`>=`Xte75 zWIF7Zh`htMIe2dV|Ai%h&l$DD49U4b+c8^6J!G#@CAR$<{YD(=0yo^y{Z8x(w@|ji z4!d!R?BUpAdyD)_LZMKoUj$adRuKBQp7_L#eG}eiSkS+)p>gfWWPE z{ez@+xdY-v7i?g-bl#&AF1T_O1KDTcrrT_BSK&v}I!cfi`n)g})1r=0C{mA$KbH1r z#PNnl*1^jQ&g}-Bl5CgHI{Nq5zP%B~$3g<2*TX`F8j6zA-{`~q#c8knU_r}P|1-}u zwHzRrJHsSLmJ9c>@L2O+6JN$7W)gN$4Z0Q|WCqK|iN156axm`vE zx}P=M?P#t($2Clyw6?R{36)SO1Oo0FZm2|K_&s&>!q}cb3FkU-x>n;7PyU`P%1*J`{poH;ujdZd=**nCfo;F_nzU; zcHH>M@vO{_f@hDr8PrUt)#hwUVM%!z+xvFP%BFRLtfsGk000W@A>|Ns5} zlX2cNaHoZTL)2%9%Ook`Ukg=mhFEHEYF_*}P|@L2d9vCU7k32d+V|8QSz08TwL!65 zE+Z=cr9q6j?HK}~U+|Ol&5{)F3IkI6ct)#)mkDY)XMN5kgzNh{IWu1Qj;g{~8%_jS z#5)zZ9)`BN_mgP6Ptd3c3!E=COgB~~{4-59c-qF1-MKFFj zq^o&G=q2J_zV|dS#}&=sBWmB3*uTJK8KiNZyk4W^z5RH*Kcmswi?zi}C1>;4zEFoT zCJ!-x&`0=@u_Jfd3@(-;=tXg+*g#x^gD&@7_40&n&K(<;-yJ!iDkyM;a7haE#U8t* zf`PbiC0N^Ae(JvKl#Cyiv_0BQ!i7F4yNfKN(i=7(l=}^Yuc4<2$k35P7y=&c;lt{< zuO+69Uu#fCX_76&iwIw37Wf#>n^4~ay0 zf>N*|!!u#$@F{GwtO&g=<_X5JMm`G&72Yk>9(u2Tmc<1|7)e2yZ{KSxb~z@YS&*h0 ziC99)yE+*)mMMf^z$x|@aCKGh68#-W@b{9D0FoO>G6KfxA(V|1_i7}TwGg9^_L^B_V;^(+R@mxs#U8-B_vp(NlB6!V? z^c5)c@4*jC*7yst+Xr-h-F9f3PnMLHzeb|p-~|c z%Z@isadBFeqP(_7DY_Ha+^Ta`*HuW)uhBDB(diFDb9#gP{@UnGI-+~0HZR;y zF$3@&O?GD49CIc`bGPLBivB!KD;0{xVz`$)R{YAn#Tu8{E-BFaAL0ab?0`|aQY5Q1&%b%W&GYDT!9FO{0<9|kT?{0qtAAV_}nM2tX^Vp0XGy} z`3oLU;T=q*U@a+wi0e_oFFYN9EW|Rcq2L3{nZw7%Dq|i_@W-IEaRSc=3t*p(-v78T zAmB+)D=(F{&teK=#|s)HtnnsvrWIe~khA5%-*nA>UN!8cuMMd+2>bx8>%9s8=h{0I z*&GgKH=(Hnz)akTg@&W)4ETqsmQKWd;Q5>r0&x8x?`4@6_6ZymjR z3Rpq`J&Kzbgo7SP+AYdkP;&+0L(fpM8?2om?+Z+TcC5`VPyhtK$ExSDCd=^R7hb<# z$ZfwVJgB@wr@f5GH7PVa-}&(dGk4{GeiC5>SJy$&*zf54Cg&x6F1iSCnn(KzUs75( zcvNV7Sn0tU$gpQ6H1mMLzN5AT)i09Q-~zPT6t_XTkwD*cT3S(;6~Tj zat3RJzl?d%mAnnWHGq}_JZf=>h-y89iTGan@ghOi#G5pO=>l88@ca(zwYo3XV*>bv z&JrBpa9LXAl|~LO+MEXxFAn<~3dil2b~zJYzlkSjuyw*qhqB!klRs=$Ilfl+6~pxm z#r6-u3Tr88LLn`%&{SGchU1r2Ih@}wo+yXmjftQ}v5U^hKmS1dx?NF4J44Ov37O7x z8;f?VdmV6)*gx?+e)I?fV4yxvVfnWIFp7VXw@YN=yY2HD9Ah+27^tsj zOTuv!4Fk|JlJ^j42Am>>tkuw{czFD(DtD_B%#Dk4K7dfZ@CDa=<&*NBQ61@16_?Qifsio+i?q zDfWkOaN00q--!kzoje-!{=c?gCoGXe^a@2XDfB6 z1R(GuEK!94opQKmU5Mx!xz+z+0A4{LsPCpN@7i{6_FI0Id~rcy1%n0E`|USJ^}M;kQlq+g5UDG zx9*q3=k+Ep^uILWqY%M}3l3b?(YE{VN271@mDK2w(3`};PL+gTqQ1Y%|!FVL3M@M|UlDmtUOW--5vZV^jC=SiT zk4`j=Ja9S$BX&LV`#gBDF#$LrX?f@V-dDws2n8c%Vhp(Tk07a7q2)g#!su9&zxb>j zQ2Nh&Dc1C`IOr4Gmef~OfB*msW+CMi1vtD1(;qf?z#lz-`i|5f zt`c9kN>4Y!cfdRGVdDsH5aMGQ?zaeU$q6nK@Ml%RN?7DC)jwE-Z#GN*0UHNF5e3Z5 zIm^b1lhB`30G8fgBuS>N1xxHEC$9%dFLXcex=$PnB zDmS-`42*#7j|?$TIxHGCZISLbvPF-={`m0%dk@2d$emXhq%PI~k%xW&kOJbp3L>N5 zwkm0=J5|zM`U|xa^mnDPjA2_hx30WhAE0WEt2KwW*trY+1f$E-)m<@#ZC{xnb_K-bFmzT9dYv8M-*B@E`Ttxe{& z@Qgi7ji1@r_OKYh7vX$3o}xvT(g{dC(R=Z#h=E6-aw-Dd1_F8Qm!?!+g`6l($&a`C zR^}Ecyx9S`f)+z#p6&OVZvd^Dm+IC6!I$*$9cW*^ml~+9 ztmm{Pow&kPQ4QuwG0)f$m|`)>O%PrfKc=PyEp80{=Tr+mddQhqE{aP*0p3Im4|we! z>f3s~1jZ<;FAO&t*E6#1NP;8>W$>qwfbXU?@XDF#3UM|I_~pv$*U7?V2?ZVc{wObD zSZ{xhq-SH-QrXGdtUg>r$wm)oZI1 zKjK6M*0GjxPATuLs$Pc)r}`az!BgKit!nv%;}F9@h1`{@UdM&SF%eigzRDV~e^hLL z@i#z)$)VHC*;CQPlg{XHXF$DD3An&(Qc$_Vs8_qw(Tosc@9vgf@Y)H$ni>b7fM2*X zsX=%ULzOX68b|WCfM7s)08_wX28N0j1fWkI9YG(&a6BEdS|bW~$OBkaR&y4P!X%SL zXmfq1IgJR@m%WUk9eKa8AutOl7XpNFoMZHmTbWQqS4@{AFZH`Tgc-Nn1iN5hpP5c# ziFscMgLCCE1led{y4J?}@Wu$A2(EfiJp*Ac^6OLqlVB~?1R1H?=Ej&jDX;#*VQeN8 zDgXY$r{ewd;qwy+tNXruYF*J<8Z1ho#calO6AQsnN#m(5^fzWOU>JwspWwP=QZ2Q1FsekJpkrwFh``AoqM8h=Srx36PJ-|k`y7>M1bk2e3 zNXQYgDg}~wN2k4OQ4IiP#(|>#uoe6?f-P*yxc;fgaQ|LPGb*%OCnX5nkTz?w*#$2U zAj@D7lxCj$gOX>N%LKsC*>qt42dHK6atZ?ghzQ{TMhQVNUP!2mxsiIm9um-j(3tZ& zcv?Y0+SMcLD3m_#i>*iuROSXO%R7BY+c{0FMAr z#SS5`AAl3Gpz2;QSH_O22YbiY(6^1t?mfRRk7E)T6&Mif23Fzs`OTG6V8C!w7zl<7 z1f_AXN3{+g?I~FcVeO8g=;PxIG#&3GL?%ijwv&IZ$O(zkqMdu~X`}h_F*mk{-m{59 zUpIyx*v*&WshJ$8oGFK4ew?<~7)N~h;Q|_8ZqpiH5?7f;f(?l;FQtdMUX7Fzk6}DD z5e1nu&nxaH+iSbM96T_hSlZiy@) z(PLW04G;X|vqqNd@9YRbti!@(2OtH*h>0p^L-txTo-~3_`-=oXqx8v$UUYi+|(T*m3Dqi0o&`4c$1Atr4a@ddf|6wx%kwR;yu#gA?uOh=tNh{!SGp3|mkb`fx>&?W3 z07LdBp|VfZbuM@ZEv62yH}N75@^uGgC@bauwIhE*lat6}grNq3`#;z#Bs7M+4)Vt5 zYmv`>==uu=M1GV%o)!_Eveo>%v;2ZcFwS%lUKJTUCbx8S8O`$6Q;|@+LtngMk&Cy) zBl|)b|ICPFbao#%ysq&0@d09>QVW7X=-vmujv_yq*4OCjZXFTbJD1mf8fT=D`Zf3^ zQ3JjCR)Ys5J)|w~;9-c?dmec6k@@P2a5(Q z9iK4&|3ZnQ^AK;Y5*tfE{H^XUVjuCZWYl-tZsJ9j7^MB26%eN}J*2Ox1 zl<(!Oqg8A{cZX7JGGg*SDo${r1OV9p#F#)_SPk$D2y9qVO;I4Yg?A%Y92%GA*Gneg zmQ6awQL?f=FJ)#-^aV0s>c3^^jL#(KJR`7>!PLYjqu4|IGFtR&@OFgL&lo4lm$ZVL z)-2F}L5mn_u(D~UU>79P_5PkIi~H?r6eKarWxkESx9s_Q(BmAzQ6W)OI>u9cBrJZx z&gPb0pA(Tik(Z15RDy;rJ|5M;`$qcbCQ%C?L_&Bk~xCg?VP5yJev2I6Ak;X%O!!~O?E z^gAsv4H%;78fYVec64+x(XD@S6nKw={!^BgAgs4TVawf+g+w8cPeaaw%HDfmw0NLc zI3PGED+d|wQ#QXB@jLNXbKhkA+4CKhO= zw|;Zq2zc#NHihLWz4au40cAyEP*n)=N4T#E4joD({j6{7ScxyxA(*Q`whUuN4VK`N zJhQ20%U(1bgu&=VV5;wh!O*BZGnw#vQ{y)*T)hqz(p_i|4~Y#w;Zk%{Db&k?DIyvtKWpB^%Kp_@68#w&Fa3{MjG*lTD<&)N?T%4Lr{u=_PY23+H+x42Y{mSq~nRe z;m{ST5;qoW@FtjGl!5*}8F{3fGQv`4v*k-3Kv>FA0>(BZVFSPD6MjFOTvTOVDxb@# zM39b|1`c2ju@0lh?iCW5-e3AgdI^C`dJu`bWe|t(st|BS^0~)y40t<`K<~7Den+qD zQ3_+4nNJR`x|_@&|IkgwlRgJ-gzZu2EVr+(S`{3@AZ8MrhCo)7a+pz*`hN+ZeJ-dp zFi$|>y-}BzUf`?JrTpFLU9744# zYT6eOFt#ww4kN+a;O&MBoeP_ttoc>$gpHZp7iGi)c&9|2@|m;uHF%(tQbybPckt`G zEH$!W&hCAi+4&MK_M~0C1WpqOhzV20V!zPUvD?H%`p`xtyxhN|h#=t<{k!N+Umg<( z1%#ML6TwfR;EWvyyB_+`5CJEJBc=0uY!I*^kVF_MN0fS%iUU-^-J|G`7@NPgZyzSg zsg1>98~E#qP-knfY#w6LWfp=CVTb?W>j(XCKSWGPgV;6?_lNq-JwkuzB?qEh4xoA$ ze%m`i31#-mq!VLvbNUlkqVa3YG$xVRv&Nc$6n=H}~(PX2C& zpb*i-NFoE1%dy39aFfL@D+;KOPaM4;n=o@3xEKzmf#$ppDuL1?FmhnTmLWorf=!S( zktH(N4mTzM z22WS=^c96t+)%`CD7ABZCmt3ve=hRyBwZ9`gC>%?Z`6+csgU6` zEogJG{8O1s%4SnCnY8$SVLWsAg+knjV}vVIAy>=SMd3f0M|o`^%qtE$78Hy)v3l(Q z!TFDygo@z#IzkvYmjTP6hYPjqN|51xJb7h{psM&i(gV{;Sye$;G-m^lR2hz#0#*TZ z8vz~76(qWzdz^Hwb^Wvn{x=s0ar|_tgY6HJ2mliqef6xu0D=%Pc}pmQ*IV7e?pIwL z9iDfjFk%=c;;~x}uPzn$3fnG^gj#eO7#Kkh+X8iK+B~@O{`KzyM7^Ak1dLZf4-cmq za*aa`c2ueWlEjfA1Q5m;kfKD_*>GN>E8wdRWUw9w{a;dn5x2l&c4x8qZh;Adqn2=9 zA7TT_t3E$rLLSv(@hWVpGzTi0|4n2akxW>S+#j&^BWNyXuZnTR&WhhR|8I-N4~EH* zay7|{IHn+bYQN)?zA8Li9e-#HlgGOrMt_5=&5MpHcPQ9*i{b&#hzl>p1%t=59srt# zfFT2E)6@4GI6TE8FxZOZ<^RFka;-*!00>|b15ByoR^qgX&)SlT3$S5n{JAz&>tv zO#9djT}XI+$^-Hv3DrIzx4aUue4dwDAEb^Q_sc0FF#j*(;~ zq1Q9fwTg#}v*<3}hu^)4?4XHYkVs5&5HMSN1m4H(mtjiVSbRQ$;L)PyFzQ!jHuZSrpB()1GKsUbOFN&ms7 zKjpf|&#f_AI7{8p4!7WH#_%__F~wa3Ml)yp#ne56VC74f=+_{+A4BV}H@8oh!O~+m%;q7Sg0kvlOlq`6F{d_uT8X8L{=oD;FW0K>HZRO`Vl+%{eW^>63*z| z0<0z>ML???D46|3#t64T?7ABJf5iD+!y&}%Gj@Rq7{7cWO*7mg6_=q5O$^2K~I{L(w z{oOfyf&wD=U_G1L`lCtyGQGlmA`AZSm3vVLdlbkqpZgnrVgCQnTTO!p0pyb*15QQ5 z{{5ov$E1uw zWfksN|B;80CjM5PRHO@%fTjX9#$a19umd=T7>b)2f!syTs=m1~McATmRbA=p+7Aez z?P;Px_4pmFDU_~Ki4w4U$b~2$Rp9qndnmZ=W?3SSpR)ivk!gDl!GXfu8L5aJF&}T3 ziI;)yS>y#-cCB9TK^Fwec}sKLL%yb{M?St1^F)nex7}UvK4Xepjm)8A5SIhC-Gh%*;U3ip3ic zfLFWu(m!`^{q<5!Ci*U{>U1n>g^2=tC=7np!?uIR%MOdsCsaN*4FR#1OG6S~f{~RY z4T8aIfiZ3mME?M}dKnP{D+H`KL1zcA+A#%)4(zQhwK$NrR_iPmixdO^pdbwbOjCwO zVguP$!}o>DzM}Ie<7{5$T?^|Hy0eDCOr#k@UELoZjy7B@h0>ZTLF49crC;000MSA?F+gxVgNG{=ZR*q6u*XxW)ei`A5y3GO9A) zxQy3&`t@878FP4!bP@IGpYN9*DBYBvre1DRX{*rbg>M|HF;}m~)p3NVL&X!moJs`M z{ok_fl7yxZfuuw4m)d6yhW!b8AC?&}-(-BloN@b`+3WtlC!l#H(5P6-3@<|V4`A3n zBl1*4VBG^i^mdxsQ1Qh%Uf#wH?*8|8ce=-GUi-Ya>dNwp=BPPzv%UxKtt2@FhjtHc z=ZGHV&etzy4oB-Cl6H-e8!u%Dy{lh+n1yD0_bD`&2(FjNUHuf@3(S7K8DSsCdi0Q4 zX8*_VKpDzGM-jY7m++WKLvdb>kXUjG4;%pe!#F@k4L=SD*Vl>h`{nmV()?QMVr45O z{IG+K-0%18ishx<-)7`6r)gDuyZG?LjfvoJk*Q{R%&~P4$?V?7lb2CtVN3m_y;OVn zz`RK2TQ~uwt;O}?MsEtN$D+7|2E@Qa5ukChsWT1Ho5h@kE6q&-3IOPe*>wK}=;bj* zmw@i(9Phm*yVVPUxDc7!=lUIFFev(%=N!1kUoSn7N(Zl7zw*78sXNj6;)Nlh*L?{g z3@Ezpl34UQ7GG|yUFGir7TdP{4Go`q{rO$P@>D-(#*Dyr?LP4Va|w1RQI+rS_uQg% z`iU*}GJEe$3?K$DWq#tjcJ*DP6jd9Ux-9?H!W8~ z^@fAfy&Flp1P+c!2b~A$stzmfykLnZm zuP{^p=qJ2xipq>?=payv9KbQ7yBr=hc7n|} z@p|aS^~IpPIyWG|0P9GV4d!XVMKulA=7*+YKk~bOU3X^z01`FaWj!H_2ixRXWrLvG{{`&C($&{nCw{7bkrV6*`q zwvsSWbrqi})WRymjtwFagJ#sMR;O$oUUedykdbO%vHB6jU7Mo~XG|mY>fG7Hd=Ij0 zABtX`gYw;yas~OufDi-*NGh}7ciKl{2?6J*`YR0GpY2G=EbC>e6Noa*O3SKU6D( zIO+7h-AJnB%4fgt%?`{Y5P{sT{JsW&JO;`dMlegP4gyls^4*$xw4{pD0@=653l+Ax zVE@*3?Qanxx69v}DGroYk5dWpQ6qHdytiEm;33@`ux6Etk7ekz7dS?1URt6wq6*6( z;r93g{m%Oki80(2u|C8Oy@Dl@SL8sc@9)^txodDv4c%D- zP!{4KaI3DyNY1(W3v&Kvh_ihwx*^^r^SUpqSpQ3E42Ud5*#aIekT$&DRIeh-XA2ZOgg~}qFV&7awqvL(BtZB%AEJ2edQskEZnjr@QLft- z9vD)@f`)|Yd6$2`Ro1x`0rI<>zBZ8ZBcl|mAKU0bR*Jq~{!wq2xXq83%H;^c5q%!b zddpPG+ewn}{YiRi(>z?o(D;`!55n+8lrbQV{`q8Ci0Z-;JNb$)&{}&kEMcL(vK_j9 zi-OPK>G=xn6|`{rXpAZ}E)Y0<5LFTAAPi>=5oU3+t|WQQjJ54lk_tA*)*z(1^Z!1az5;!a~--?|ECu3s0;F$L`Qd`WyQ3UQ#A!~i(zZq=LTr$I>YP7pszPWu&L-0=nM_Mzh(&O!bzBmMMyay&?x8qPmI)vj}*?wMS9539U#YL8}I-C z3K{1{Y(xTA4rH<^rKE;yGQPx0k08|i*Dyf>v-$FCyd4HbHgFBD^A z*>gpkp$6VV8y*uLFjs7t2rcTo(02`VJY|ACn{<4hrZ+ycaY~>5xU8P9^eu6SohoiJ zP9Ey`f^UsB$EUH|7;=~th;TPPtzFpvwEe^ho)q2kh+3X2+s&QC^cSvHKfRR&?@c2D z=-45*B;m$c)U8JX(Wda((!x`A*BFKGmj8>R<|JZKh_xxE&P&(77(^IafWGk!_zVn1 zWzw%4fEnju!rb2525Q|&P!m} zGz|mnL<~^^Z1Y*Kz^xEdUJtJa*PtnALF27s;!h%SCrL1t1ycyHV0PLdG%VlL#FuNwC=F8-Y|3oe&qA2`#q1dZ42SfRmk2rL~g_RaJ)`<39KNt(Z*ck-LHI^!|MaEnVFRgzf*_8H7#GiIq=7|4sW1+J?(c&s!h77?8Q!R{C+^MF$IInM|cxZ#0_M?#*+^f+NO`iM~{y9 z_SxV-V&3U#Vi$ZcE`gwI8wP=(XUe7176?QkVG-XLOMc&yz^45-w<=9qkyc@lfRcD+*uQNqj(Dq|iMBK>3IN{5=mDXuLz^RWd6voaYIL z1}0{t@qendiT<<#$O23kC{>AkzvRv13In;p^Q1G%(e!7K$Bs*dp zdJatP*UTn9%>i{Zvx`tg|56-{dvmJY;K%7Y26vtK%L_XJ3X(xGmDIuEaWKh4z@lJ* zvSiObig;+XoY;T;hlP^hK9%?n=!pq1mcm3O#6DsF{=!q|zsO+_$N%B9gPW@lmb2Tu zv4X70i+7C~?Md~(z3^xD!e3Kz{GtkBhZHbOjDs#LOJfiZ?O-X_+Qe9vIEjArDn)gC zdGa&6irx8E#T^4iiVBlO3JjUHE6wT9O?KN`ki5)!gzwrFpYQZSZSyECMIqq|hZOjh znN)2DV@ZH9uVH>yPJloe>Kj*j;NL@FS>nW#QsmW$tEPhrwd*wmkc4YSVHSdL%A1e@ zL8-qFeq$eI!M{h#SbUhD!qxL;G2O-Ni2b9R0q)jhQCt)l;MHY{FN^D}$ON0C5)570 z;H{`%XYrg|7z-2X4S<@qUh-+%y34K!K!gmz@U|cq4G|ObaIvRV&lKbJWsd93z6bGc zTvK8gqz|L!3L%KO%J{kNVb4g<44sE0v6N{T_H-*y9{;Hfglk`>Yd~Y*b={q&RO~Je z0E!NRfPkQtC>Bn~wKSfz-~Gc$B71zwV>DmRsf0X~+)qj7L8C43)5`Zz^1Ibi6dTx* z>aF1@-|CjEZc(ApX<{P0mX3!!s+g&x(6$v7cq9qqnBrDy1-${z7#2Q6ls%P2?dxZIpN%!DPnH4xUGS z4aIKy>Vpe6{y%~M&I1PA)=c}w^O89%eXH-q@%FUyl4qU^5tIoJY_r+3RogI9l^RVs z7njm|)xRCtj=Q{KhccOf=}2r#+j#ebs`r79DK{zx+)VF$u5TlO3$qoZ+80;~FDSTp z4OI2KM(We)9MHv_Js9i{SSL5GJ`&$wEn@-5+c``aAj|!)N67mEViOsa)qZU39g6r- zfBB6tzp;GT*_!(=X3xvliiAnB;ig&EourUCZAS?X46A%g&&R-^DNjA^L&kC=b|!`W-`5)m$Vq{W|Nl3hn$MdVTYUWbAF}tT4G?^dnLffxRZS+QTyuZcRObx>J?YowXS|xmZMaNvGAMNB8 z4#mbqx?f=T;~^)|JFsjX;HhOJ&?NThy#hj~+3X@r5d3sJNoH*lI-OcUe`maO4kughYLIW5&5BJDR zsPS=whw>6<_2R{7_~?`C!Urxtn9J=oLY)gb9+E#vW|tC3ah05*IKIkeBw9n3@GQ!v zbU=>^YM7Typ`Y>*8Q)IW=D2a@PSgA1hvj(u`w>1!F`{6TE^bh08Yp1hk_mya|_&mv>$(uiX z%!B%O1*SnluNx977^aBgrEN9Nv=KY>3HJTz+|jZ)ViO1benT(uJZX++L7_YN#SThJ zt`5i#BnS%@Ly)1^4)?ViXlu{gkR4Hs7Wfm*CuzTa^<)SilAUh&+)wToD;M&Dq%{XO14)H`2U!@#iroVa) zyX}?i=O)Tcpn)L4=_zTKQgOv!-@eja=P1L9StR&fzv3~h_$dpTq7o&hYb(fGu>A-rF z?q2}uFd`Id=dW=|s3^6|hEQ)*FsfP_HXo)f<2jH6=p%q${gWa67{S&LrhCBB9Yvw! zeP6R*-!GC)iG=t1SiId|l)41~oYCIq;lf2lz#m@T>JN4QFOG>CODQ%#S0g@N#fc(d zfWW&Mp*xY6`@ zu+Tk$tZ6Sp4E6L6K-4TCgeNfQQx9X`EEpEJ!3Xq8`32B~OulGiOD}`8E)V%c2h2;~ z{oj+kkz*O#-W) zAFlBdh!x%xYPjXaY34bmq;N~?Ox^^qMC8m2rl1U1au!D@*RK!A5vqKA00hP+IZx3qw69RIt1(`Z=-!D~_H%>;&)0&+Dd# zy5baE?-&d^{JZMM^is?qIyrZ;CbbAI9V9nHc*Q?vlTBolx1Niu% zdtsK4r6xzv^gI9n2Pz@yBn7(R079SO^u{<3i^VUChR;ssyd>kY_>7b{Hw1cK8}|U* z^vb{ZPmE`P_SGd>e*^Aae1LrY3;_uPB|K!kM~!i_dIR$5z4LVMo8F!W(r^d2C6n(` z<5>P*4>xa!&LQVSfq3bJlYxXB7HY5cyi(${e)+oI2g5^t{|p3k8gy$=0Q~Q6itO}k zX76BLas;E5e<#G}*qx*$WROvNUpHTpO#C(f%@d(3my7k+<1xJdc&!1)ss>cM-@#v= zO(>M;U)cbcT{dG=FFrd;rRp1NYY6ejDi0Op6mA|>%u*p-k9Bh#%G*kI{V$E?#Fwjt zkO$jA-P@c-1-`_jE45p?BzbMOKelc~fKy^LzWyhBxXxd~T+~fh+V~(5VD_Q9x&^0T zI7nq#jRLCwF0auRHVy4lC-~VV`NGXViSRTu9SCe=v67|7&O1Fn$|sx|sWDJNuHV@4 zKbhL=n%0ZWnBVxH;viL2tc=ri@uEh#I8BL9y-iC06}Ju3Cu1Boc-h7Ey{HF z`?wTs{S5pgf}j29PYQ8$+-W^^wF4RS*o{(-IcIh9Z2uSUOX`ll1z307Q(#EfzHx(q z17FUywz}k;BOEpUSgxUlVE9LulOAf$S{C%hpD}yrSWj;8ai*!TIU_sIEu{c6>9G16 z)pJ8@MuLBe3}_jt9ccb7&}<&TuzW*SRO&Yytz2kF1HJLy-8FIM$RB^CwCTVHewQY% z-Xl=CqBgKN$LbE>(Rcc0c(}n-4A*^w2gIN}grV~KiHL%z!DS_p-A~+IGjMhw$45H) z1{nMPlH6m-sQ=hP;YK0EX~gsysbqKvdscUX0{8(b!IQ2b)oBtoeVV8d3z;j>4@>tKcK2za5 zW!>L>#~B^Ra^|D|&{<~uV^5&)TnC?fa&51_4r2ILl3Gb?L4G&W+T%j|JnzDw)k zy#u4~8z0g1cDT46BObw+mSRjA2LbRJ2gDFDTp}6RHV*r~EB&f;=nqu|0hVynO-~S# zDJSoz3%U}Fp zW2dp{j^wg8*vsDdqSk2H!DoV?$R4(3GboTU%*a^BxAb4i{zt2gG3_xM@ESkwC&?g+ zu1exMaG!Il>%7z-Z8*17d9 z!i1;~uND9(BC^msGVv=5z}AG&^EplDa!`a)?C5hHZ{3Swd&?-2meY8MV90) z%hr#KcvGK;D^GZH5^)A7oZ2?1RU9)+fUA!0mb}=AG>Hnh8bE*jJw6G8!Jpjeu)mcm z;xUTGyggIxi1X2r*GIWQSmG^xkW9b3`h0p{HxefHXin2QP8m-0Jr9sU<*(F5 zMj-stfY3#+G@)seqXjB`n8_HTZS>;*3Q#Zf`b$>EZUh8l-@sn}>2wiR+G*Tw`=|s zLHAE!f$|Dbh8eh}F0_%Hg5MbW@`eo_G;HSrq$`XqabIn-h!$;{s)q(%zI%#E2!(w9 zh#Mld&-4D0MBf`Sz`L%lCcAn$b3myDKgp^1z=ItEpiDDG!2nPnXZ|s7aLU_F4pwYw1kEl622e|YfxS+(|z|XP) z_a*D_u3pb83YA@7WzxH{rSUH zI+HfOf5&}{)HN`7)BoQx_4ob%*^E^G_);^1j?1h|{O_~)x1>(#FMluiDB29k@fUsH zKM`j0Jfj=BSAuAfbc6T%qy*i}v_iHdn3bF0000ZOA?hdvILHkHR9E#C|L!YsU;pJF zvy6MhL=vO4^{>Vl)(e^uAR&PE)|cyx@g^@F-;Eh3sINDWn^F_6mY|bQ>+7;+kBTKc zM|iG+@$+T!%ElRxvF!evL@2^+Ix?}p9#{OR5xqNCBd7VazqLUSyUqNlz8P|EKUd1S zI#TZ|w%6RnMleEvq0_ScmSPD=%eRK75kS0wk_+~*en@EHQfAruA*daL*5dMzdj&;F zIJz_z*V59j&`D56n@}}lM8-Q z4;6%h`k?jLNTiYHBywGVj3^sBz`+pqW045$An-1j zM0JwZUgF?m$H=B7Us-A4ppivvgHc8kzJRZm0A8`pJZMkE7Y}$!R5wvG?!W49zsu;V z&f;WRW$I6)X?Jl1raa~>X7Z3(a%H@Z@y6F^nc9|@D7FbpAuoXJ+t^9p&d$>L7)VEI zhhmqp5BnA;qo>#VX^;3o#bXE9KEaA0=Ksg&5DD7#oBtoML(U)|tIK7CdNb-f=;sPv z&Q<2cjvf|@3J?f$Gl$P@#dx-=DU#f?6zgIk5#ukr;!kGg#&|fd%cc;xL5G8g_`=0v zw%slY0IiZ{DO{nSb!FN&C@^614h|?>ci9lOZVmzC3Pm@3K=kzrDRG@2rm?b?E#=?E z0*Q$T?{URrL@~20?_o=|;%^$F=yBlSOF`h{kp~=%-w0#!0uLrqs6sHAb1-G2VjE2U z!LW6?v<{9GU;p&sMgRXnSgc6GNeqz){?}SmSVKQIeR#l_7ieK*ts|Ub*kQrjgGr)? zgogh34C|(oDh+jt9fVL_7OoCP)0Qw^bfiWt59`p z-+%}95)zIB-9A@+!P8e!%2nG^lUWINJR}$Mn1BDn?hd^91W}{!@@xvEERlhR{mm_y zATklTZPSYLG2bb^*zq*Rbl2LGIw*(yrxG;s?|i-p+*yht4Kzi&#qq!Ua5w1+5{*Ma z{HyDfgYL?C`3UK{_(WaiXG2S;uTxWPJiCk70I~ z9hsSlBHDk`=rE);M$sPtqhUP*@ElCQxKtkqF*P4gXRuZrAfTbfA>GZG^4!$04-j*| zS>|%phUXGRDF_fC$-(92hAy#hsJ?=mtk}5&Wqhd!im*5ek#a=TumB@0_^r~n6u8W- z^di1rDqFJ%v;3q$&_1ukD>#K5YoZmi{xB!7k>$HazA12Qe^z4x*8kVMQ@gv% z+NJV>5e9Ca5`1UgSWVy2S;%s+FmSK^35v_y4o4{R@b*~j!HE`YuR(>pYcOXDe9VPE z|4S~2Yk>bR1VjkKl3y|KIQt;u?2(axG>^fbDX_u`C^l1FtO*jZ2Dmjzi;yTcEsThmG$A|6?OU{h=pcJ6Y25XStSxIW@HkBu%f5{q^Q zi>D4gUxh~k9cx)$#)vIUB$pXxMjyBLU2Dlwu2ws_iotZU6ql zOdae$ZGA8t{Vn1Z#jXPeB5N9JfZ`_$^(ONJpX%MJ&+nf$fgvHhJWDck_z*Px2WL z_)ptU>JvYEb(kA3x8XR#0$y8~O}t09%HAhkctC|KT>CDw*ftL|Ag3ca zErVe2cOoheBy;F3g~bqJptlcs=_||)(wy6``sYD5pWt_;crROA&;B+v|CRT&{5|0# z1KPaH>_;tICHqDz|9tecA58(kJ{73JzOYo4U583#F3=(2SC9rG` zbs4GO;>v-bdIo{}gm1`5S&y>IWwPZ5QjrViAdj_R#SmwtSG?t|N5-MF!drv}16tqj z6hWASLA=i6(W}Fcfk#1^gZTpk8xy~wFF0Y*!55V~%2RskBt;C%=3fS#?l7!&Jd9;s z2@5iUu|?(Ev3WJ++>(mz0x(hFQJ+~zekmWjxPr6iB0G`%LLJDCIUKYYs6WIAVAwu` zVEw{pIWOsIGbsjv_vJOrN&FI)y8@0X8ct}90q0S4F6X)ap5g1C?qDdYWxAV}pw@U; z;q~)>ls>}$0l<#(G=OlslZyZW00rd1Oh(+1*{ciY_NXtQkxe^kVgVh#4 zgYrQ9mJi7ToRsbyC}wqE8i; z%P~*Ye3O!A_>CTcpnjq#8VAQs$RFd)4XF)5!@m9WKf!@;&15v@ocLmVctyr69&Oc z?G}o1mg*g!?CC6}mnpq~bFi0`gVFb+6wsGJGjPD&iT(U%VQQ^z5IL3(w}2hnsI0$b(cty;MQJ`-)c(Z~u%)dW?TCom5hj*Vt|_ z%BD~@jg!fhzQG7Yc*6Mj*v5+D_o|3D5#}&RM5Ge(-p9ln-%3e5-&|pkmBkvP1V0aT zxS<$@@9SjZ34!(e#j*J|b;K{N>$ZLGtb+1F-n$bj=$gp*!VAzok`Ld%TW}yyrJHf_ z{||9cdB6YR?s7~XcZj9t`G)WE-}of#dgK%%-3RDN3Mh2gQX``gLSUpyN_qTYf=Jg% zD)^%n^sp8Odk5^q&_2b{1X1?a+8B~a8Bl^fD>2Z~p4F(X*NH=eByhS3u8Lfls%tIR zCrgnl29nV*;Fo*fQhk*$KVPi@J#yNpL7~&J^bVhXRxW{{dIo{=>3sJ%C0TVmY66+T zs=-HK!0Mu&wBC+ceB!1!qOrthLv%(ne)WzqA6!8nPc5W4E(0h61Qi9rP*@}x0KNg3 zJcDrEGw@kbz-HL@eLyH3B^um2mNM`tw9|U0hZ3#M#QxE#PDjcgxXf91W6_0yB3w(< z0Vs_|Dh67j$Z!6`z(yYaXdHZWId_eYnTwQO0)wz$Waf^k*XTl^*9q^+OYwGW_J7Tq z^vuyl61{`49S2}?fCr{hp&h#&4{XJZFK8pzUi^q+MG6}afOhwz{%qU$PrdxA(+J5G z8X!zwSZ!586-fJ>f5ffyU8O=>XZL&ZMF`ULEz5s+@FzM8?h||W_3O!&>H{FQ7Cs>{ zhsCih{Rxf(M)SPTWZ^J?MwjnZ(3Xf0Tn+@J@TTl07Ho2Sw^^sT2Y?p%3!~LMToqHK zwo$Mib)s8Cvp6umz_o4{vP1h2C^7}X0)3D;tl*$1rL4;S<*_B~w=XgeiS@Dd4S0_c z@gA&Yf%=aZa|AZyREDyze{m%3c9fo%u_bS_`s}R8o|L|61m_$H^3vgO0;hln3!(mZ zR2E+tHq!>d>>0=?8V8_oA9e)NYFwzUvXhTLX~mx z-Ymf%eJ%XJl73S84#D4p?E(9>+Q9+m*Or*V-`K_VV1iFT(ytJO7M*l~AeIlma7Z_Y zLb2SEz6JN+*ue!vm`KO1`oz>8==oUFWQ3l^+WJU@9A-b*l6%VjNYilx2B)F0Xt7%p zm+kd3ZrgId#FPT`JO{)YjLfGevL`HxTT+(Qh^Z`N>>p*`s4Tuv$_M<`W4`!X#4wF^ z9j~yiAT$^S$$wVy0$r~ChG2jYin8EvQI`$?3k`Fq!Q$U1^B2}#!t(%Ja7e(wL489k z@6bH(mcE|D(jkb<6+)^*=Bf~RgYrcn;OQCc8wUz6|M`t{UToxx=-58GnA`GlyO}af z#Es4oD*YFMxoPHJ{v^w%r{@zw=v&b!hCBX+Iu!~~zbp19$hOZ4E?u<>46o)_+_>!ww zSoX@NQQ28Lbo(y$WG!illBymqk|v5N3EZ~GV=L6;i0E(L`hw^V)+B%R@tqL1Id77k zh3YBv13*kymaw3+n2d}Av;m0)fh`zB;`bJmE&ePguFF9` z-}T=&;>2upz3M3F(Vc#mEqFG2;Q>hcQG)%ypr{+P#vC26#Iq9mwbAH;&TBRES+B`2 zEj_1-~s>bmc7hns=cJy4pHk-U)T%BfC=)d zlXtw5Nx>}iEwr#4#c#7u?>U%i;<3X ztUE3ed6@qZ(R&uV4ez-0-_VfvtHb~4u=M;}-zs|64HCHQ?EaeGVjtJ%-t4k`9#6 z4#eW(`E37;*fwnQ&~2cEm`iONPL9ByP*fsfDp^~1#@TA)cQ)5NpO%*~cYFi754(%*gk{w$6C?81(T z2Tc$_6DW*okY~HIzwcf+>LMfmseVn9Cx5gBD8aRRsmZEu)lws&8U=%~AUN}2dB*&x zMZ|w+ZipUD2V+DVr$00nhv;ke2k8Z*g*7xj=ike!`D_%1I*1-~Uzo5$)UbZ3(Vk&z;^$&r?eu2CBo#Fs;he^VBR%xIWqiQ!tOBs7_T z9@A4e%;kYI236^nKY3M2zY^+r}9+ui}*#uUpID$=uT6 z5LD(2eSj2N1rQp9ptfyF_+SQ=?6lw;4HW#{mYx6^Zy!dSr0h{MjR{}$x3BNeO(Fh^ za`D55sh?TC5b^TDr3N!en4`Z#7BVR`i@fi4~aHrJRoAm#E1D!#>T7DxWX616}GUFF4u&RjraPN z0R8^LU*KmK!BtZD4@q~3*eT60CIS9Bz^U8)PtDu7;|lWl;k)?m?tq3-Z_AyIYjv z0z2-sgkM(ifJ=`I*T-aW3QfR<&wXsgiV6@ut1oLFwQAF$v)av=xB>hh$W$UD#>H2?Gv5g>nP56F!!gABx*wD=8oKI01Bptt;=Y)c>cI(wE} zZ$DA_pi5r+oX^eAzx^gWc_pwTZyQ{AJ7^YPveJ@bhA<3kz>%45ckzT5fLdGoNAE^< z>LqfJ!HxDtqnGbY`|-+iq=?FfiscC7-*jNS<^jmk>I&5Y?gl}yACR(9$G zZg9}`vkPmJjGBW{C@I&8kad_X)Pp6o$q?J8kSuo)DzMxeSS_`y`ztS9VmN=6%0WoT zLi^@GgSRa$KX?&WEzT;=ne_d2u7Wal|1H_AC1h?CTWs(!A3SO*)dk* zw`JKDAzS79pK|vqFors4+{pAiPaAK{Q!?gLYZyk)*c4C|L-=}{F(Oq_PCy2%15FQ#0mv*(TKiRY-I`|7`W_a zw*pvSWMt+3wZg&)%E$a(iWcy*zE@KKusm~VO$r9U5&+#>fKiIO({`ZFEs1@O+Eu!f zY;GJ;`Nf+@2yy`EKo`IKjQ!b}mc?Kgl`LKk9u=tkV-SNFVi^XzFZ7bq#@{;2rR>)- zV#6FXdOf3J`6xM})}Ukt37zB&TcSE>!G9MlMlA8F8VqLEDPJX8NopcD5!wU#Gv#dc4Bl z@3uEjWJOW+B^`Qecqe5FnwjkX)_0z`4Yl~5Z7gDA%Mf*NKkm?#4H_852*L5br%LBs z8bj$?DPclUA-zWq)Fy{+;maGRaEZ4a;ik-^1#4WP`|e2-@%SBg9RGun;nsISHORO zsV#^Kj$%ffBU?u2>nT4a?a#cVE$o-|d-&8wtNugTyQ?Nn9S6_TIxi1{V9QJVeSTno zqivIDyGUpZQAe4NvEqs3_3NM7Adc0kAv4sG_vdv>-%){A*%!)D&xW-iYCfcHCLLs% zZ5!$9xv0S|RTBR#01;dRUJChp4))#f4gxZ_S1p;z8DT)GbulAxP`@&-y3oRFr-!+r zc~2pi%P^!Do03GN<5fFY-xdzKcTxfpNns_NT*S)#d4=^e|X72Oyb>#IR4m zef__L`3I0Kep|wbenRtLy8+%s?8>-b`?6%$6cib6`Fk|pv5vKHo4a2d;+8+D(RCLJijdPW`_W$ih zdNWfEkxt$dSbWW&0~5yT6muz;Z7ztH3N-ki03T^HbWMLe0lHsi(*udv;K#^9kU%}1 z=iH^9=eVxpIKOV1rYI<-xUMTuG5slelqLkp?ge69#Zap8fgTR^A(V3$potRZQ1NIo zzsXV5CK+PHf!vt`y+kfX$$9cqEDolni;oL+xkDb7MlV404FjNZ>1sM&0i`&-10f>j zf$iO-t(co#8Xt$dywRnpJlPe8Fm~cYg*sr&k}3eB?1LcVuL#}o`TEymfnUg2)7ha# zOvaY=2}|rOI~UAE#B-2e6$ng%GfqFCXdX5%pnU^C;uqrit)79PelL`fNmT<$7xZjX zFpkefF?1t@5174&MG_&5Th*&sJ*&OhZ1AxKQXw>oxcWog_;3GX-+o3%qUGu!comSD zu@)`8R~WNTKqF%$lCsk*Gwe5df1@oT&|c=YtN3y0t*$|DUQVEkw9*Osed5=f547+fkj4MZ)h8fr#z92(Iwor&e zr?>LeHIeC%K+wn$uU+!pRPO6cjGvDhxARhXP zbN^XwJ6xCCLHpDCX}@y+6zZz0%I+0jH{+n|^lZMLVQ4!3WwZI7fuLyt*ae_v{1CMJ zYh16gf8|R&VFehH7yzw+86l!JbDK`ix)MI(v0lG{+3Cuu;I!h13Qho7Fbv;`6P|GI zH{qX6e%Z+sB7yq{=p=H`XWl~HRx3-R`ewh3Hx!B_^D!Q*Yb=tl%gvc>#_imbLR2yR zIA?cxZ+RbN9+~}s!pJ?t4ZzF&Rh5q6xdI1fV+<=U*@F~1)>n9!?fxodAD|_s}MSced}z_Vx)p%sH4a zF}Ij9M`_d-8EhOz%&JrwU5g(K51&5(*+>aAww+N2Y##%((ekmdt8zyg96|Z6!LWWJ z{R71GHHtzz2Eptb2c9F^Mva4D>F7ouO3$7h41hIh*IQmvL3vBJtnmup7z*A7P`<6y zBx%w7?Q4Hdd4nD0w@iC85^@w#Mg&9}2;c{~Jpx_us#8r{BxQhmM7{FHce<79S?Y~u z_(ph%dyPD|N_H^jinJaQR4G$vC_V^Yz|FEb@Y)0KuwKxUgV)Tfw*KNl0eOp!KeGFM zEixT1XSn(=D-F&VZJu8%f-$2;!hO`{q+u)Z0iKvW;0y1bm^&)AYGhh!&Nw@O%1f#Q zQXnu=-6icg^&u~c|K+TywIW0jNdkBlyU)A7h8WpOT|l3ked9nrm3faYtz0ZJz3OU$ z-taaS{Z;QEglmkp&p9Ms$DV`q?62TzSSrgT0(af$=VRChT_GViGHHi%h{(%b%S(_#CjQR>&cF1Z%5%Gah)5F6yPW%pvrb=|i z20*P^n}RckxH?pPy0OuL)D{t@{tuCbGO^|QAg08>gB=BiL0buq*MsNuMeZvWcxFQM z=MEOb$``=}zq~1Nq&W&GKK=AxG}(&80t9?0JqCci77XQ|Ji42J6M2C+42n~Oa(Oqj z1e=#2Z_XP0Z?dIUm!q&t^uHJoZKbD>5U_l**b)Kw^`S8_plC!GRm}b{BhY9z4Tgyw z3AUyPEI$jqfqFXqe--hPD{Hq9e7%3h4G^N_e|8Nf9f80g8fl+*d7x|kXp=^qBT${n z*z0I}qGN6!@cqn$-u3T;19u(sBD=APTKefvv)^5b*x^7mJhE?S#T`@rU-1t@Xe65Ff~-@bg|EVBRck zj}H{fbPc*EFUfEu9m#6KHUD{gsMJCo4jUYx82v*2yQMR0YZW;^2b_@{Xq)fhuCn(3 zc0i;|G?q_%|3h_y;v?q_tQB@G{caN$DA3KryWjWO4w#VA90NiE2V2u21=!)iwp zI{*Oy(XS?m!`xCY{ucSq!wGZDM3I-YTUiGESW-e8 zyJE3olOj*$kyn+3+vR%IOcZFLQHt8yZU4#5`bRaPaHYBZu$QAoB#a;91D%_Y^KvOn2eX;Xtu$c>+L&odQ^bit6c(C8KRI`A5!RG)bl*QSz2)l~ zRm6vf=v$?FP}Z7VdqM?-GWMGu*Th0cRJbcv(y^eqw2C$_6o=8HGxOg~8V13$Cg_CF zTVPIzl!%7umYGP9zRv??#$q~r3egi$o^o3UU@S;9f-D;bjTkZ7(jQ|oTB_KDE5;ZO z^H5{cXT&}P2UU@?v9kRg;pY~pu8u3m^*7SD5?upGE{{XfX$p7rUmi_+-}t6_FpCcM zEWEdWCzeIi>$Sdxyuo@=WE5f!_ zLAJ{dZ~yicmF*bkCFnW{ibMp&v=R!fEguXSf7xGi63UEFUDumieab-t9-vjA_Ny<- zWA{Vee)8LidQzMySk$D19@z%AdmYaVvI;RQBy+FH>z6*HT|Cx7s1_`N5w&xYL@SuiuDNVVqn-b z3|KReVBkHKf;d9QJ3PdSlbadx|MV`w^AceAQ9(wU|I*S*oZrZQp_#l5AThglw3G*Q z<)yVr5Q!j|{9u6UUHb#}0~!u2BnD|uSne?YE7}x8VlCJ~fNMP?nv3}0Hb*K3KJ6Gu zXeP1Oh+h=!T?_KiytjV-)o~&cx*8piL`osIr)!q8lM=&XQwUmzrkJiej&v_gN+k4* z{*P)Qd`2PuQ(`PZCt22|YLjgZ95Hv=mYC%G-!fGKhhJBvtogcagNZW~egl&;Q$f|0 zyfhD7`3B1lkUb^0(S9f{>T~eOlAp<|@^83wNHytYe{}!4^n>ZnSM!u&0d|sgeW2(B z!(mb61P+kNqF;`}q?97*PXGkURYKnn5%;aL{%!Mvvmfy|C+*av5;6!Zm}zigyn+V^ z=#n^xV0H$AVR>%TaRnsvF@**SrtsoB^nIUHt+46tc41BbUkC_6bUP2s#H((|lYI*- z|1^BX>|698R-EgVnmDNC@kXgdj2S`~d5RooO`Q?3lbEb=uyL?`gY1>75vn^gGdmpu z>;IW){xZo!QNdsG{@B2^2(u|B+X*z#eKknQ$h)no{qqF z$b~Eu2Ep6!69p$#yazv*4a2uC`*CB2L;jJk^Skov+KT)4dJK+GqLSaf{x6fVq`3S{ zrYPaG3m(IhLr$6r!ViCN^A+Evwk$4Ow%Orw6d)8ug@hdA1b*5CZ$mk}br1+$#i9Le zs(qIDa2X@%@aO7KgZvkM=Ewi!4?Tm#E+cq_=Qv1S&7B`ij&95#^c%p%sO?Nhhnm5=9Mq?kr3^2S)*`U$=E zxxdH!D_b~mmi@>GSi1&dEXz*8yTYIUPxb@G;_m1fNU&@h2(XT6Y;s7=;Y11d@BfG% zE4D@?Y#0Dj6f1=YK*YZT*(%8mKE#|4tQ zGEI=*=L|+DL-1Zam8TUIh-mMx;~TMbK>d$Hj2ZuL*r`{_xWZupY+hhfBIr?Hor@o} z&_U18;Kr!bn9C&GKANdzU`~vS`KhI^FQrpu=q?g}3jMyT`YuA7V5cC;-O8pCD2$TI z3lyS1Pg*dV;L{hug+{~Z^HTrvpDN=HLJDm1{>q@H?(8KkQ=$ z&+!9Uv)QxlA|XNs_6eU8g@S2)*%Hg7J?VOv5+d#!v!&u+{)P(rbVLpQIBorduq((y z1m(p5d-TJohGZY$uy$etww;sDfxz8!QDTts+<}s@N8%6LGUI87sQNAcMJws}nsogN zw#Ohf^vzfn{*w9o?Xl;xPf=vpHVg;EZZ zd|n5rX>AU#)9Tmq(>5oRr}h&XiVmy~SU?9u%Nl5-q5eBI3a|a#x?$CX&|C&rVHG4% z!{A#te+;E+S|mEXG13wnWy7=P2dA{F=#Q@F&Hs!~!SeT{OZax=#=kAl!%#f~K>GMf zdtuqn!r&h4E*OHRYhiLH;gsM z%7+`Yg8Q;n=QsZzeF@{kD@vg4OdPy=lN$!JD7v35-a%=LrpAiL3}AIKSEg2`Q9H}S zFPDra-4R)@nSBOO)egn#Ju&@{=Kd|QedYcue?Vv#f@|bcGL}2;E+C`64SrkbH|75P zK!y!2f%zpSB1kblUNA_oV!`6UiwEc*wURZv==1c4XwIH?4#aMLii+FAuQd&wflSKd zmxf8>w_nAU^&J6?LH^`A`{oDZOX)JznQHF`;BinQ0iXb}z#JgF0kgFEWImyF>nkD2 zLDF?$^+Agh9*c8V^oAQ3xf4dUK2=OAK%f#rg)kbx3Z?!N;V#A z|0<}oRGEk%LD1|Q$JT7VT9F6Do=H0_4}J@nfB%p@-b*+C`U^nmZ2{G^4!p-v1o;FW zi5(@`zyHun!24*14kSf`9}Pa3T_hC> z2cmxo9YevoO(%>vCG7ywsjH~P8KOT=li4%t-QV_pm{m0|c1EI0MPeNVujVx-=7u~a zNepcY0G$VCfE#Bf9D;PLD_gaFRi(u26gMxI`q!b1+*6ZP$x>%xi5jX{gKlt3-XD8R ze~a;XtHv0HpFClMgyRQP zejD;|B-yk3%YVtT6I&k``z?t)g8PV)&^=S=%NMn2t4!$C_1gc(aMS+5n2uUMEO?D7 zl2H~6#22#=SUbXsJkg|5;?!G(gX}(S<+D7H?~7W#|V&r$?azK0NPGA zA`ukl0L5~_a%}Ahz565`hjgX=i1j(~GF2d%ZuOvnR^Q0_EJT=RG;c^ngh)r|_*9O+ z36q7;s7m%RXgCc6;NT&lFPPB;1V}6-K^=7Uan*_uq3JZmohXp0cR?mbR9LWpn{Po2 zf^`!kWE=7!3%=hZCx5=giBfVg()ML4{za{`HPY8R{22!}-Z95RyLW(@3 zkRt$ucT~1Sea_}_=D*A5m&Nq`=Kmk)E9~kGX0A8o zpno^Q1N?xSdZ2e;<*b=pJ{>X25Fv6z#emPJ)EO>Z?j=2c`S(XS-WDPRI2(ed(BHpV-v-9CDI)fXmunF+wCx-@e|$ zB1}OdDC>tFf6dZ`-Cu0K#Embp%iRn#_M*DX#2uK*>Ier8Axg>IlV!N^NtC=a~i1q!B(IM<~C$%$Ru zV;Kp~hN7JUKV>s45U&^jIyJjF}$ixZ&x9-gG7+TP-jg?OmqqmNwfm>!2kTtF_@ ztI&L!DynJJ%~yJk@#tlJI{JOO27#%vY=8bht8{ezgO}mh(c_^3d2HlYwJ|0Qb4Sp) z>rm~rUeQcDZ9}%z^nD^en4X1T@6F58x{ZXQ{ox*-ml`#vh!7M}Oc;%EIvOfw+lA}V z|Jr@JMQMDCg%C&RT^^4vm)|)MLK$mU`aPzxk#gTYY|nl#vuCqsjEnRS7-Yf^i{v2s zBws!tMq!9m^aK5cjKs7vq%HExATya8*cDqSOq^A=Mh)ZhdK-FTn#3EFaQ<^K!8{zt-`@=%9|l_sZ(@7>E$a7-JFwFU5QW8LRx| zRi1yM22+f^x_yY#>>p`162H--+>I`bKmX8rG4K1=-<62iJ%jo&1W(gNe4MpuUAG)| zC`%F`I<(7-RRoAUaWT0BQtsd7eUD!tgVzUE1FJb+#qVl^P=OFB3DQNmkE07d`RR&; zRA{>NY#Rr#Y#z|UmV^Bs!HRi{B^m^2a|CGo-tt(V6wQgSdTFO)WuEdixie>-ES_ADxEVi4W1)idqmMX4N zE`3O!XAoFfa>!{4BVL&8wucr)+r|f9^s9v356chw5cYahAE@CE*ftOBAB!Qc=cO{# zASJ^Lx21dsw(3KN=8$Bd_4@v=(KJ8bMZ7M45ap@~$LtfKB@G;Y8FjqC-WowQD73Fu zgJHbG|NRB0k@6eF&Aflz$q`0MZ7%*IN&)ri4YY@C3r%Cfk1!zXWvJn`9Y6kx1Qo`wIb!GBA$+0M|S9hCZ z1U>#Q(!l+!S*3jg%g~vw3==`CO!zZ%Z$n5vOo9ii^8BPy%a_Se>G5g}BVN`$J}JYD z3WGvvI2MMoN@H09w)F|ZaFb9$4GznZUVqpgUf6t+1^ifw z8-gW}aAZ}yYw+1lf(F_!aA5Lv5fE|CmepEAEu4`FQ<11D}4MpM4B+$!uvO z1dEpO0)z-hD3SyPLtWKooRLwI&^3WjVq;j_GO#OBy3$c(sGn$4>qF)B6LP|yI+#^2 zSusq>ie$Y@)N>uc000C3A@4i|znBo)f)Scq^8Ek8T#lDJ?+^G``vKUMnH}^jP(ds@K9kio?p5 z=zbESe3{B-=QDegCF8}OwBc#GZlUI zLxWygt-sV0`zJZsHRV9}{3FJ)`ESrM99{+ea|IJuk_`?+{^|%H|LXi)g9IaSpFi9! zUxhkM_G}m;erGbaghH}V+MS`-v?ofgl@ohI{AKZ-gJ!l2alBJiT61GjHmc^o#0c@? z>tQ3L@!AKVdJ)7Q{}?2P07~wHuQv73IxC2Ef^O2c;)A~Ln;AYQj3r&q%lN4aBQpM< zkKas-@4|neqU$!Jc<|3~)R`y*7jigJ2dG5-+xhrI)6~gbltOjXUt{rv*jrTL?MLaV zb=bnULEH^$*hFuf+HXTYWs?s~!yZ5M7*X8@T5bzF!pWts9;Qf5UxNECtCijhDdQAm zN5s{bZ~DF&jMI!@%*lV~FseEuVDZ+E`@a*)BR)~F|NK~I1%suQGAOpczYv23q(Zqk zECe+CdFX|@SmZ4ey=9lWx=RWOa)J9rT9Kn&8U-61hN@2%u7+}VtN%eIA2u)jH45rq z^hFJSqNvF`%@7%Z+I}2@^77d1No!=cuH(yLSuh{bcgdLl6QO_OrVR#L$2b9BQW3=^eh;3g`|k_|;EB`BOKKdG^_7D|b zQ_+Funx?ms7(HYbYXZ99*6vi|zrViVKZ7%1Nx1vZ(#gUum!syz@qMoIa|n+je=+C- zFY=nkIwxP{znv4qLO^Tz;`4%Zo;-=K=K6zJGzfO%>4zHwUz4uGu-)FY&-`B0`i=vw z$!6bN=H~ku?+HCW?v4K%HUyUAJPALH{cESY!e)4Fqh97KWDL-E+OQ|nKdt)u^jD+t zoMUGW+5mN{)ytg+KLn}gn#Mir{uK8L+7>Dl9o((&{f&*E8=;Bd1`M0O{q=~t2K$4I zH+E}&x{K$U`LKU`yIby5qQClIyp25z>zA^$^YBaQ)7`rUQ8CnR(g;V?+Q@-rZz zh5F?rXt0Q3=S{)@_PJIRiQ){>5TZDcxPZ?u^U1}n_?JLR`fzlj~dR3&LCb>tqPXkYL2Wzi&c?`&RoBc?IKMJ_kEx!@Kue zmq7Y8LLN#Z#Iy%&qi76o8jVcaUqlhmOX{pZk`W;-gCQggv>2ieVg5hJAHp-wL^t&9 zH~;)4^)tM1xMu$k@btv2rf>WM0Ef6b?>F(lJ{Z7zVwOP590o0!EP^d2XnmI31jIKW zL>dEhBaG)W1X#7e%42?jsUX4->=gn) z$V5~a?ne+o^bG@s)I6F*y@ua>f;}D{Wqc+<6A|W;Rt4ZR?5Ypr%p+jXG!27Bf#tdw zg&;*1#RCDruvP%AVWMF5Dj=Upzy*TPfvs>Onop(u>Rqvbuz`kEForN-rC5f4$V6#- zr0%prT!Fef&W0)^~<!Jua(s~JN;q)giqP`a4M7RBv<5~~s7 zHRU&f5bD!ym)vx`?0>tTy~iKk^2D3Zl)q6VEMiBGn%uGmW%N+6u4A414Uuy7Cl$$K|sA*OIIk7nsqtSzV+gQ3^2&7!r$26ry>@_VQ-Q&XtZ7AWVRF zHO6>smE2b|T4-oY6b6u4+i?#qd$_Aw>qIuf*1=MAAJfK=4Z zAZQ-E2%XA6a;Y(DV(cv)Vz3;z+!SSl*me35=exAd!SDtY9ziXfA7DPlIdmdMvk(9H zOS6MZ$n=6=Rs((_H;K4C;jNuHhyVHsaCs6oI~VEcR1` z?|P)ly3j-*({k_7#m5oqeGhupS8pv?gpiY(i~!Ea6eK{AMp=PDB<11;qo3$1)pP9H zm+y!mkmp$9B@dM)oLM;%0B%4ofMP-)nkR9W`a`An@Tb#HO+Wd!A0P5WD1oFXsQ>=o zK@s9m#?APS8PIs|o^S1C)F}i|=<*HX%ILhJu*Z1(j*i>i^!pTWdWkc^^huc^K?cSc&C~+OFWrA2LCfIx@05Zv$Z?a1 z&Hg{(TM2(hasnv0A8idF&~*(oWXnYdffj3)oZtWGD;#R#LDSmo8aTYWvTU`4HYpGw zd5KuQD1_hPU{~L}?@N_jL4gqQLW8SNqNo^P4Ll#PAT$RRPDBO<-gFxA$pQ8>cbrI; z6EQn*;ba*x?~-3MA@6E2eVZ`*WIiK~VI2nxDH;Y3RA-N=ykZDJKeKoK4plI95-};i z@%jqzINNdUH)<5$|M|9W{C^7YIGBE`o|=Sx48)8`ambc~MvUYhRqgMvn1m1P^1E(e zxl%-xCJ+;+%0V0p$lyKip}zOIcb;O2M4Vm#@s^2pLDk8wHS%xsBU6G?OWV}fQpyt* z%8<+%gD_@3VJ@F<()$y>+1ebC{XGlXOa~w#_m{AP+tGp6>GQ_}q~1)M{~wSLa60C` z0Bj&kUOMvPN%?|1vh;v(yNyOTTRi)v7ufu?mD6dk@|x3dL3_D$0HayM-}?Du%0tzH z#zxw3^VjzN67O#RyT|7~o9WlR0oo*nTd~xVmF&xSRHVO{fBqB_iwD5T6u0ukN5n(Y zWy9%`g6HMDyVFgG(f&M0>qE#sgC50gJ-Kg2RmjE;;QvMV@=81d(1D86Kioh^x6m$H zw3`Wm*n-6Z(Exb_@Hn|J=!bm*JtawtTr=u>m4t*DpP`%fmJ@kDjwU?1-%yckeGk2Y zN1B|kohwg?r4pqU>N+Usulf{A3*Y4~{|I&uWkzCD8HqTRm4b7Md*9_CSwAn1j2(Y| z;Wybos!`MArl@}R%GIf8%8})Vr9${)b$1*taEGuU_tM5)aD&>~IUKiU!Aq~YNr^$y zvmnt4y}KWxuh(TTKvQew_?UteNkW9^V@ZPSc4A$(0nf>lYbDKgP>~q@{)QG3xl&j^ znR2M2=3l!j`ISHR_=3YJ9j>^+3?E?FJmhR2N1y^)2P}eQ&SC$}YUo)Jf%ehdALI{K zxHz3tD1K7de511`{vlZR9{sx?JT6u|61xv!^nb@C@^5sHl%A2vgVka7~D zWf2Y=8jcu6grSN-iwES0fyqrnnrlJOXc=;p(Ls*nw-I3^WuU>wBw|v~Y#u5joPv*o z;x-17;y1zGfbT$jMgx^>jFCA3&By7bMp6o=ewTdR1X$EQf9XPhYczr%Q}V88ARxV|~DHh`!OAxIx}^4zp4EP@yswwVDBTWOtx*AcLJ$$p8Te7E`}p;(Bp zW0;LNX-a)PgsSk6Wfc@b4mFNPZcRC{-~&nEHauLY3;cG!sB?kJH5+DPj{lYN?LM1{ zRv+a_YNUH7POu?l-wY-$>tBP`1G*yXu*3I$zfYzJ2?#;6qttpI0O2WICy2nZOG{oG zhQWCOs3Qe}wM1_^7)QZl zv7u2NyT8hsk}MbUEyM;Ok_SUQKGa=9P_z)j=tCBo62g_4OyWz~E8_Z7OAo1^r^+)z z-q{Az<)y;_fFu}7Wc{?BGM)V%j;P>hmgf>29^ReZu?06&|V?~8Q z6!;wg0;)0r{+8!h_QGNCYGLq0U;vFvVLv0%d&D83H9X!{@Grpnz)-L-z`XoT zXqwv==-r<{`WKAp&&?$UPMFDNH=y^ZN7*Dx(U@0%gMtWwbt{>f)lkDihL4zk|DcvG z68tkZ!S^@+`Uy%$#Lb+GI1UuU`ETIYm;Z$c5|Zj`X|Mc22gzi*swMRLa_$mAvN58@ zhtPn<1D>4pD=Nvi=fj{`zLM>V!zO_Ghtd5J0IV{vL>!pTC00i(j=v zkQZ3}79alhsf-(>urEs8tSfBLpKCoa{p_qz6rv0oJ8=RyHG?*LYsB%)| zseTnW50*2qI7jfI1|Pq-uG(}xxd^Y8^P5ajY<;G^B%mu?-H=6UF1J$s4|_#jme8i1 zI%PngYI%J=sX>&zo7xP*2Q8jqenVVK;{sRT(pImU zX!3N-r@e(A>qf>6O91b*aXw6O*Xi=UJZwx)gvu#I$ML(T-U1F$oowJU!x1BC12r~E zwjpx-*FH)b_~ML7A^S6G)&GPhj@dtVciN5!3972qS3A>N71;IpM)p51Me;4Z;t_QF zEK(yOAreO-?C}(4W!bx`&9IAr;(f>w` z$V4G0<|1~GEHY+gkN@HBN@5pRSW{n)%ecyE|3Q%0))dn{3z+(du)zNJeJf9wNMuA1 zh%mrvab*}@Jyc!?w_AoOF*7hyo$f9L3t{oGf??38Un@N;=vB^)yq%-b*!-h2`|Mj? zcAwvb3%0(lFTGUIt$ppLdHVuSW*(!=k&7lMX6a~4hBbZ4{_gqjjF*+UnKeH&#`@j^V7}8 zeddI6Keg#Iq_Ki6^M7W}_Hygu(x|~b_jplmUpDRmp2-x>|K5Bf3zzAYl|=erBJeUj z(K3!RD#j^|vHkOZnYOOPU-2FH`zq|aD87T_`X_6lWo;T029Cj)l|%`P(ngl4P5vv7 zN3XDDAo=VHvtHvR>}HMO-VZ+O?`y+06xDKo756_?mWy}+LR6dvm;H{?| z1-Bkw>w*ABks>?d3updaL*S@^0~H9yteRXEwtM#_6c}oUPD>5C1&R2#MU~nhK;x0p z5;nBAu>GX5F1F$-tm2j90uHpxt8n8c2MXKueA z?bnHCeEtk{8^jKQA_+ukdQZDDB$pRs*&@DKa>CSzaV9o19a_iRJDB#-^oJLOMZ*Xn^_H>GcpkgUZkHgjKG36w=B&4P0iPCPFcn z{r4j%x7BgHrLDGq!?^FX%*HQu?ARZ!|CKuuk{)ooG88EA?>NB&iz8MC3`ZENnZh<- zc*YLP(zsE%FSv-XIVf9KV*>!RmtUS;ui@<==)^o$YT!5)V*Tns!yJ+Jzp!%F(SKtU zKFdDL92j3mYiFb~ot9q8kD$*^$Y4vhzGNW`X1$~NqodueuR`9Db9ANb6R|8*M+l!s z8}ZE=NNx2q+!DxWGH^uc$ZyoH=~R&nq9S1m%igRI5f41$xt&nwo&HtGLn0%cMj3&Q z3Gf~pEVOg@f7!ZkfF8a;K=<{EMsZqGA5Bq>*79rWh1^rKD6vcTm}A5}Ll%oD+^oCo!P zqL*}B8UlxLuByFyrZHh<>VV(F^`6NRDVcA7QOvSv^|i>G7#eiKPoQrzZfxgEr?;@Y(w-r<`|zx zLDj@qH`A$qzmrK2($yHtJCKQn}B<%Z*%b};@}@( zKf_mnFh`ebJvCP+{ly=b5NBV0Wp|me15AK`loEf}mI`j)A6?X$LXZU_S3$pbI+Q*x z4UF--{11&zbA(bqq$JrivgNudgpf4A3&0fsFA(u4Z%~e$WZ2{o3=Rq>j~L*tV8P=I zrYa+OYI1_g^qfy-W4PJHBN50y&4_>ihO?#xgwCC(Wu8WXoG)#&_xYcrg9t!cyIx)a zizTP}F+nMUz~m*I7#-+xron8-et`xh2LR%6bXNi57K84LTr zh+N1PKDPbCNjS=>o>PYk2ucWv)B zzFuEH%*|ryVY!C`tNDM5O3FOXotTxmPz?ss9kA9gy*YL9I?6JIWqD30ZTlZB(~w*u zD8r73So|m;Q;LtfhggO%_wiF{!nxJqVS{99pYEV`% zvfgpj%(mFAy}n1-D%>W>mjwFxNUzKsNB>IMR|BT$FT;!I&tG2n000oOA@WEBy-FZ~ z5dPO#pMQ|P^arj^K>@rNOM!vm-+z2htWV4^z55nk{Y{Zi6YC)b@85~)B5mL(rzE&D zFHpN;E)vWdxMF{Q_n^0zeh9Xhk^Z*LimQ zAO3~?1Lcl@w^K15?~a^x?cPAg2p$%OuBG`0zDwuo zAffGgkJW&WxvW)}^rS(hHp>+8ej#znh zz4JxqH;+sI>Yy$Hm>ueSzXlA^Pbn)KG=kl)z(bP*DNB7Ao&YuFpsSph%u4JCGQ|ap z{H$;H!)6GpRz8s)7i5#;Z~!Yi@)$F&^ORo*TH|@me7;gw8sMX_VcfPok`R*Yw|)@^ zM}V*ZNkA+B5I`Wn2|uN@U~klEF;u6dSK1r!9_G4|3JV3sY6(yL*r8eqXB?)(BJ`i-Ff1#GV0W%{ol9%3h8qIS z&v9^W;FII`YQ8gZFn7}d-m8C6#oI^Ft45XM!RUNOf}k%#LkZTgZJ1r4k37iKcmWTtR=+WUdbv)aOmNICVc}y_Pad0 zl~uzj2q98PaL|51D+5$2iyr7j#qVoew^*onZIL4nJ@({cxb6@RVGwjaIJmVmLkK|? zplvFQFD$Np1FOg}y^31_L{lLQcF4RW`Y)T*B z5@p(%k8>ksvFWw-RH?Acs5>&MbwCf@8^;O4c=?^Pv{1$5zat&8Oe$0VF!Oo;ppaX~ z{U9ln3kAPHr|HV+4q-Ag7(u9Lql^&KKk739K;0!8gXTKSz-iUEHR4w>Qqp-Yljk1^ zl*0@VI`8W_ zX2)9n`unQ?Q7nSm1ab>F4cIgd1F26JVAwPb17O9P2F|AyEgz`-U8CJkZ!4W;>)L1m zeK1+}sPM_eSQ->r?e(xy6f+!vF+$I*eKd|d_5P*0)#y`5+umKHk(fb5oZ>_d2tIWV zbs(&@gZ}J?n4(d$@0U>`MYw~UC{SKWENEv*3E$r>rtVv1B4seboPD1F&pnffnZd;2_`GFM}g^p^3iA#AqLH6xpcn&Kjxbn-}Dz0Fm&JYX~DGClM)OnT3 z-<6EsgN|^}P4(BI!8ZN@idx)2-pWbyt}rgsO*rCyuI^of2pPkv9}5$!Hr}QSP38U| z+77kR+KB49VV4??#DeM(W$5}29&HFaT!Eu7tG0y{Mz!x|3mk)sIi^3O2D)b&JelVwWPKkw1_jGOmV#ud zXjEKfTut}-)#N5^ZG;j zd?+I=(Bwf4eUES~o zHM9yh*bb^c5TY7HE`t$f98I5Q5rC7*mp0}E?k6;kJfK?opRY?%EE!EitjAw}?$sBs z>E#i+IYnCxydmK0dSlVSoqVD=S$AFhU}gp%BAsGjYvkF;ZNJ{-z&y@><7MqqZa`C@ z`xy_tQ;;S|jA&G3P^#vum}d2WW4tEU&E3oGXQ;!OSjK;fIHSzm{#bSVuI}@aCt^ay zi)K36>=+t6nfSef+4ap!-ZhJ(2ltebNh60X-2{LzWrY+21OU8{CzX+5lL6GYFlfmv z8j)y%mdYtHShhj(ncDD)Z+`E5#v@UvaPZ`iCFI-t@MBar5TE+X^m;uBi@BNX8UdR_ zBiovGI~LE96A{zt zY_dg(oXk9%HUG5f7m>Ejx7fMLNq?=$vkV;L(mE zw8L65DMpY{c+uwXc02&1Qh;;v#pqwIKL^9|ct=gAC0kr2} zolnx63T}YgofNoMWM4Pu{S^f=<;e*jcphXpopQLTbt$?ogb$(Ye%g>CAYz~G_t6<}e79aAZH z?ma=zH$4t>(fcJtMff2z1-RA-mXgsB=(!PW912RAmc)y~k$k5g2OC{8kF}_mN#UwN zwde&2(jK1rX7S9)b?fK|K!yRGxZL9YHB7DJ(~X)~$kKo$cf=W6{059!k0u#qf%io4 zVBA9gB3GH=$?-7D+n#rDAnBI%3gWCJQPbF8ILIG;FVbGE`u$BrBR`P>BEw3Ak9@DdlpiqSgvGJJ zo9cin8C;U6(QhpVw#?OeU$``S;cURMn1G>P{$4kL)4TRLoF&pZ2d^A*ncmulvm%}2lWgnW9+!Q z{kUL$9qV9D5u6hsbctV-DgpRx!Dz65nPb5oS_YKO#2CuSiCNMnX^>AG9NnHK6tn;x-KS`_E^&Tlxa4D#GBfe-ZL8 zhD=4P`A|dS_1~h$2f0+u@6hGR`{|vp)wl82iIU_xdYo?zYXH2FdmR;wYx8#0Q+jGLQI(#={f@8|*&p9mqeZ zU?7veZuMNRfmwx;V9pZ)V8FyOy~eqXWn;FEA<=K)1NHW@^oA6%vEX6}hwuMxX(_OJ z%_IIjB&w9|H30xRNC9<&B4bIm91zNKQ8Dqg)gdJr7Z+);(d$s>Gyj#X6D zQad!zutWgB^L_4r-_U%|L?`a`zycCy5ix_#|4phfMZWx%emO7Y9`Xrv6Dbfd(xA0$ zqY(J$|FKEhIlZQN*!N#xI~<~nN@fu){(MCIXcc#FyPjfTgX+-cYxF!pOFUw7;_(XMuH;^WW&=-ta3qZ^OECWDCE%p!a_(wtV zhH2&q1%8+0qp)8!FX2NVeSnZGw+<12eG!2~ci-*)%fF4|WeE*SOv>Fl2v;w*Brk{s zBlvJx*P&C(jlg%L&&^6AKu#JYb8mo`My0kD%5b7<0^qV^fmdij?nQpJrbdK_?f+h1 zElESf9-YtsQhIc=g#xD1sVh{nvH|;{VBpzN*@yI(AeJ&eVL{>Yx{L}Kv~*N-H%~;v zBK7%r9qZ0wiVjvc_WoX@XO5c>mr5{JEh3^A<6Nwj+(AjttNDm8DG-U^IZfTacm0>O z4vYr?+z)rrJ>#m-*iLq^fh&kt?I_r3c%C3SUMKhdB5Ij)WC9DrW zU@%XIE5E3s|6li&gO}p|{l}E3cj&X0(hT)(UC6TcLrusc4e%24#p!i!?O>0rbx_n=B}9A9)*nnaf+28&1b&xhim&DFVf|F zI$#A13H#g|Nt=uS1OVF}`+%tcNVR&xV30}CG1{&*Ek-Kgz&Z>9lL-h0OnBHJeE9TK zbRbswKUKh0DPDb>>d~2_+qPNy{%B+NZ(*>s8L4v>fNNV0XZ7KM*>;XRBpz(%Qyf** zn!-_#YqFKy7f@J2k0fy;BLV6`x_NgN-^r8HyJ7-?unIaRl``lCozMEN$F>Fp2LEgX zAmWQDexZE3_Ez~2 zs(cgW-@@=lU`k@kAB&4XTGBz=k0wTmZ@(A1R$WTYNWl0waNEEO>-2Qr_p+=jV(I@R zu%PC+>a1Ay2wl0e`% zQQ7%w2ku^l|FQ_vXR~w-1BEf>hML-I|HKe|ndp|xJpE+t!ZsNGT5tu+09>=M1=Mf_ zwE$d5V1m;XHxOP(RK-tev{KRSV*=S@ikC~4`YsSS!WeQNXnizetom3N_)J~*^7iQ_ zgb+Zlr481>GPV8>rA1gpAdX5f)E}!U%pr#I3>tBVzVO4$LWMEIABYQ_l^kQx4g)@W zqeuPGqvjpy|DgVGKbEM#u>vHBvJ_#aR9G~$z}>|o7R=vG++z^6k?m>n-Dc5Zs3Z`5 zIDJu+$brz&ijLJ02t$||@)K#AWM{Yi`f{#Y5@h5t8@{e6k(h3Twdj?`%Muw2MI}#NKuxuTjVpzKd!Lw$;jx}d9e~o$f;k{N?Ew0vh|@+4RX3@XwSusTf~GkTF)0rjNL$DW8Osbee*zXq zpCfsbZEXtwxRvI@)6W>@IP-#nc20rv|J1twJUXVZ{6kl+{8%2mB7Thz>)}&OSkJvF~o(!cj)JTc` z6VhLc8q?9RXC#mDbCMF0q$MO1>T-s_|ag249C4>hG5K(~wBTX2!^uoNma{m}GN!HA7yZG*^y?vT?XB-h=*68Zi zwR0N3h~VsI-|<?9x6a3vKTVmwDsLpK5cH5}2yKegdW*DnVFD;vS2 z?*dPV`7|J>J%Ivjwm?>!W72@`i8HR`{A0V3j$NGvjJ8G+uj}=nD}Gr-gvE)W0~fHx zXopD@e7H;`!VJ;!z7+^oBSSsC2k)aZ-(!O&vahp_34#zz7#0+7v@X7#NvXg0ab`{^ zQgqcd4oIE>c;XP-H>DHl!;vnvpN&y3@6n(BKcS!@& zAhaTZpn3+TB|*$T|IkV@(a>uHcS|@VAdEW(!OeqU<-07=ATr90fMP*c2D?@5Rl~Dq z=^K|Ly9A{~E0H|ghuaw z_wVUTv;{H776kyv0XBz}paiK11vl0bX5p8D-DeE~KOEt0Tg-1C30O0bN~7SiwYM$l zq|+&o`~{nru1Z7Gl(RXIiR7-tv&EtD>9f^WZj*pvXWy3E^P$~|FnW?M4TE6NGLmB+ z=E>$C|Ktxq{$)<{fB&H{I<}2hx?YzGC}bIJgu>WNFZsX!q7?z}rN4F3imrehSabnB zF{m737s11u!2NSU651i72iGh}w0%EjW*7T%y#Oa#LxhIUz=j*G{8E#mq=l9|vMWp!$t*b`66-&^8T&K+rmk#n>~DP&f>I zq2!=A55><)hFCq42P}{uO+)Fdw{fUbB@%c>quj)pRUvCckU7Z#&ou|tEJm8deR?44 zD&=(tV5H<4!ay8>K?0ONsTcg{%kz4HABF?O_lq1G&&;rbTUj7T#@_ep$W;H^<0QYl zSdgc8aI2k}d*#b;fbpFfPW`JLHDDh=fc~p3B$uOIgR-Fzv|JsD1F<8LNSSTsJrG<} z8wMsmImtB~<7x51!jAMa_R zj5Q0R^mCFqE^=3%Eb@iHH}Cu~oYQm9PWXZk-JOZ!W_Uj=pOnDz4c>!HeUHx|zhZc2 z33jOrAnsWA_uze@I*}3W`>pi9zX%l%<1Y=Syiyl2BN9ON48*x~zj0a@X3y<%;un_F zfUz@i2>p~0St@Ch&_|0L35N;8puM%i-0&ol|C$ti4U&K;RmA-aV>!QwA7^GAq_YXM z99Q@80Dr(f=CyD9qzmS((W7P&wW^ox-*zM@G>ix`q}UqhvIss7lY|L@NYZU__vKW= z(+bZ7z%0BvT}RSKOY(R%UwR%bfp;&xbL@14>y2|}X3BlGMk1SwRKip~`X|YEiTR^P(eZc;@%vidV zF;^&WK&+Z3TyEbAqkInj&&Hk7=@u zNUzF4L+H*v=V%f(jR@T$I2Lh z<9+MT;yj8$bN=q`^)aCz2^^oZb6!l-UzPHjZ3%23o0vV`&quNMS~U~0^f`1L`=E99 z+myXO1sXnqqaiG`2ZK1hnn2)T5)p?CjZ_yjH7L854JAT)Gi7)rG#EL2VnBoaOyXb0 zm9P?QVHh?KV8{O0cA9K~+gkV!JfxRM@huPyz=^bmix{ga0S?UZbx;n2>-f2z`}?%}SU9`M>|r zTCI_AdNqY`1juZ z`dji9oqxWCA!HE1JN|KhFIs27eQ_BHaYOC#7<`lj9k+8oPRc$z5@ydq)1Z8IlBbyf zl_|J9Kx#rKi6eKqkRBXENWDKR*udL4B0{4A5tn!!wL=C_T zw7_eJ012oZ8@ArAILuinECvdUOZgMZH6xSaF$CxzyXzE`W&En%s?&msi5nLrf?!q1R=e7W^7^>*P#9CSjFBDt z@ACYkDr~#qH{2l@O!3qAdQ1^s zxWHDb;6D7imq+)?EMm-ubWcfpkB+!J4Swf!M}4YeH2WQuio|vIv9v-V)XW$bJwKrL zx6;d4vF)p<y%YYNi*}Rn^6i{ z=wXVLS2u?GMCHMXr=Q_ahL50{)nva%JS|G<4??`ALI=(eMM{ZnlB-b$&BvDbHa~a^$MY@;8D9-D$pL@^2pNJ5VETIt zI!gn#&^r==bbQ2D0P5*r)wL*H#rpE4Ltt$Hu8#!Ff~AQlQqffR^2&P0KFTiLM9;7% zR~!Fi6SiM{rvGc(_E&I-P-R39+)RZErsQ#oL9XF}gl2!(wQEo6q6I1^?QtmD_my?) z$DIP`^Mka%5ede zE0N?#0Xv*YOBqItl9X~#VV1kj9*99IE4qs#$x`GzOvq`DVFNe~19q7gnvkLaaMZsv zJ5(`?>0nczs3Puhx^mWm0}#lw2nq|%q3jXPX<|zee?$t|eQYNg5er%esznhAAavQx zH~arVEz@fHZ%a9MAqesmLl|I>B|=22w8UClx1yqr3y{PRFuPWf z_rij0giH*z9(XXXF$B#RrZ}a$-^^hpB{$zf0u=B>3|960-s$VhD$AXJ@n%5COCcf> z=Huh<+3;<(zS2`I_`~ChT`6UfnJdUG*gb<_`EytR-16`BTUS-i8&gbvjmjU;Ox&WZ6=6~K%R~C zY)RYNMg3(6AYt-TWnFu z8fr+Zoq(dfv4h(TXUIz*?aff5Ks7Rlv9Ghn+Yzq#xU z7-{Y&@Rt8K|8XQIwOSYx_PJ>@FcB-D6^M24%X`4KCib6*2~9r&*nB`x7zSTHw9&{wiOC z-@n${^*4^8OIAK8Umf*xFHopDi?43{Rd^4$xkrK6ko!V$mIq=~<1G43{y*Vf(EB0( zKN*AU#2}7k7{gW)r=ws@kfx`!18A#Vfg`g#pTB0UkQn2Z(>TUlj=EhijrqTRY{mvb zGCliP;wp<4;?Spft^*a}>kte5ufMyzCEI$x@p}ibe(N`9&lhIQVWC4-X29oO9IAM| zp3R?%o7(nn-{1W+2l(^rJM`!0l`3lC2}y-=B+fT~u7iKv&?3E<|Wp(Aff_7RtX-$T*U~Az=gb$zwQC?nJI3%#vTA@C4GDRyb`Kk{rVGt zV|c9a68?!$mUi9zuChM5LQKHyIpOayL!}XcIC5<8G+4TY&o4+H#po#$Gpxqt-5Dl# z%CO^K_W-ho08*LqQ7p@Kh@`pj^sK(#=un6;Y>boKGd%?l<%-SzX-jsyl`MuG9%JsY z<j6lD7Q2q-rW>W1#|WNh36QX|wINMsLJH+QHiL zT35v0{JxMSn~0;`n?G?B40>16z62H{d%O5;lhUCqa?*$?y*unzv$gpKU+te^^osZG z)FK-exo@>A`HSa>IFd#mYy6rA^|Ca?T-RXCXj#x;I{sRd2k1mPg~h~gvur@z$XVh~ zB=T2)PS}lbdaYN=zxsp1X7*D%Z4HH+W$G%#jqqYrh{z`3uVq7BSjuzj3Ns@~`b7XSou2g~AxB9oMHWz7xy zi5|rj6dlxgbuQY(Ac%qaUwy5E=7f)6dK?2Nkn`T6djn%SUzh5WiU=wd;Qe`2USI8? zEmVt0gF<7xtP?QFoM>c$NE{Rc5Qd{k6j5W@i1 z`{Iy^dJ*#P7au2VXe@CqqWTK5-^+GE3WO?d6H(I~>SR#$CL_0->OvZTkTRtfLUZJq==EpOWEy_$0_~L{YXr` zmPuoHvQ6gfdOeKxUT;qo zu4xQT4T3h3UQI6bee(aj3Z=b%TtaUzHxEUhzHAfL+y9ICuWZ@n@$Gf^&gDUb4#=P9 z{Q4_iCjV{xjAQ4~FPkWU>z2pA{9i!E zS#cWcJiOR!T%KUSP>yJsy#?bu*S{`w590V6>WX*^Uu`O6cc{2b#lB1-M4!pW14wV= zeY0YCXF6wuFY{k9kmZQz|8F<(THAnxAZbP9g0&)G}|JSNZhe1E~J?lF4)8B!l1@@>u z`?qyZuA=GbX5ZvpMlN7klEH>scXvO9Mz8RpD@Q&8Rd7=ip;V3H%{N>AWB78vC zSBtT&t6qM*VOS&DdE*J89fUY7J1&OgT`I2s&{0Oop$aR77xe%CX&)~Lh0%P0f!g+7 z3QEH`YeDZ2cG)uxrGA&iGjU^W4&A>PA!}-eNSpOT`pO2NO~}iPQ{%VLX+qp62u4JA zu;;ZDX9#yX03d!ncV-dY2~aP+CSMqC=q`Y$e_{B+C=NH@D(^#&8%*B+93qFmqS1Xv zZm1-qL_Ajz6X^SX5IDsy6_F5hq-Vwv3zM^-^d5%(z4RDR!Cw3JTwr7+7?x&EUb2C= z=RY4J4QEZ#uk;jArAy*Ch`J93&PkpVIBZb^O!^!$@fSUAY%ly0^TwHtLXGM~73)__ zKiYTBK9ma(^(tzR9Zqi3aRplir%w9Ky}!oj*|RAGG>nz&%r3_6qjQGf8neKV~DRg86BSkSwF@dp6wrB8JBFLPn>_%DeOJaC~Auo-`E z{+f??O`tD)j{}4epOgyGU+6TWIYsOgoPin(Tq3#*0keCTi=#zKFUXkGbY0TWx=iti z`Fh2@@oz6~@&*ItjGL9IcSb^{_Sa-PYBg>EmFS+1nZVtEw_uZS0m1Ds_)v@-rW@ZM zRLAWdj;|W_l1qoX`&9squhWzvyziZ|B;Bfo^~ZuTNP7eN=9m4^kE;;~WKYu>{ z{Rh7Pe7gn)Pt#xUT=dVgomRM?dM)Rk#n;DHo+>b6Rv6w?x)?jYS$p#T+!CHKFbZ|w ze{L31n?I(nNPvB~f2p?%CtF(KB9 zJ+XbHgL!SFhMFNw|N7hM;tVg)Jp=P>%n}GMHXAlUeWPYw5X3t1@k;CZwajHtR1j1%QP7^RpC_L!+`$9N*2rvg5 zyT73J@7i*v=7+`v2^DukgcmaAd@ySzGEcv%+9^ytN+)lAii4JyGkrvZ5Lw{DYly^N zSj&UMwrp@B8VLuO9-tjdyDL$}q+?$&h6vtbx?ct2k`ylzAO;b6Q_m?eRC`LA$ndr2 zhw%f@I&2#{eV$(kAr=mU_3suN5mE2=(e&lklkr9xC~@Zqfwn?czp~x94qcaSXfL~8 z=1KkZBuPT!3&(W$Dk8&x|CZ9aa`LE*n{*+Ogo}#BYQG?c;_smYt>C8ai{$FHB#NCi z`RFLAQQwrJP(KlRx+zdJe6I5iVh4Z|DK(B0yG#;~i6j*sX%grZN(l;s?_WV&$rOc+ ziD!*(+83Y2r zn>UN25cA}1p#vOY3BK~#*^`cY-+h^~3@D}!lvWeX*FhKU{EGc?;HlrraUgsBf=let zTVk<&E|-3gqDvGOtIEIFmL?MD8V0)HQk1&y0*A|q`*c@u0*VVdGytPR#?1+J$Cm|^ zgO@arUp#$6ZM%N?juSxZ{`$@Eo^J+dhhJ&2JO?d;@Zu-QBA4Q~*v>}sEJ_=laVwXj zWjt_VC=yD9R374B!~)Nmay$8aQNfE>?fH=c{xS9!il(FG#?y-5`n!1>hPTfxWs zUW?`W^~>23_yk?LJ6>TO13>f*1F#5rz$oVtJbShI zg$#}F?0H?rv209o+z8o9bWgA-X z$0sX#Lqv%n+dzp3W3@X6>{b+}JU}{Co zdRk45L@1DQRJ)U_9LL1)Tv#z+(}URr9>_4CR!OyD5P~pl8%b1RNc0b&nL&<=82G*e z67u15;Is#q!xsw8!-Pa>IBQXgaqgv0ta$9JCXPW zB;g0q$VWi_B2fk<<>&=DXe{vB1OiqIzdvBW<85nv(;HMKO<~}N_xuf??66&4pB&Bn zSiX*bv(*@cV1drlIKa3mP25qk<3nGDI=`%T!^N@&sIAsPywNg7PbO2PQTOG*Q8 zl$SxUXe$TO2)_6s3AN-LA_K!qkfpGL%z|*y!cZUING0=zx&`Br`|_}E^kbl*`qK4( zgVrtJ>+}qS8FI9NMuEFHaio^|>=a-$4?y-C@HZFV%n;+tG#^1BDHkBnkANK|8v)QF zS{>WxI1Q|1%mi1h7U%zja`byGJ^4Iw* z?;txzdw;qJ4-W7hhLQ@>o;%^xD!-xdGneAwa6RMQ{`Y~86B}a}d^=34rVmf=S{WF+ zG8XaXOIqoZ?P&I*{jBAg)Hotu!RW}WmTVJX^_@8TLg=ygY~rBrh!5kr^gMUiAKAmQ zvG;hvZX8RjGZtp~lK1)T1O>mt_6cQH0U^V9ZCBt)(o}TNEUHptSy6d33_$VIl^?cc8II zx+?h6BORf3sDg9AK%T0fbjCRKx9RpiprNy4_oh3lJI-y+;|ZKZ z^b8-8X(|B}iEw@p<82TaeKp`df%PFeUMHlIUu+<49Q6ItX~z$5-v?~3!gZkb6ucGt zg2kvK*j{bA9KF`awkVc64n!W=R9Kbz-KEkZW3m4)N8Ji|u{*F%2mC-jBv9=eUl*`# z9vV+a*t@htAFy)QJdq|Kgc#bhAqXEg+y)vMtKDV&mVgqN1%idzz$@TVCpAPe!FswN z4*iq-;@+JHD{o51raN$ubeulTy@r@(U-tR{_fv;ao0DvGzjseLf)5F>db$PQz(zer zb#^H3AAPnBh1rO*F&@5~VDJ4Pw|({a2c+COe8R%l=(k_LD&=iC1{OM)4hCror^Oe* zG*0xnTZor9c4f>OW`?SjFO->NkVYCl2f8QASz2U7;xrp9G|L>2auT2|mBc`Qsi@~A z77xgjgW&bKNqamBY<=Ir{jvwYmcN(F+>+4fe6sI<1)05f-Ly}((-1PBvHB1u_BVHZ zvO9j4Qz-KME*~$Tw{3_h8>(d({vWOUMD*b59}XW{F#JX4>2N-uM0xlAA`hxrO-p+S zw(Uyz)+|gW6+xqf5(CG42G2-VSN|K43qAqYe-(WTVVLQ-xTdSxfh(zJ?+m)`k;g$$ zp!%8m&^fOncvLKTpOI$L_~6_UO#1jt9S(B3W3suJI4s%UULs=>-%kWzXQAeZmc| zsrvJ#r4i*#!z`nG;{>u0PCxN(W-L?~iGrPJ2VJ7!CNx+7R{t=-G2E2$uqhILU!u$W zSmB^jv_18Dz*Qx`p?Q49Mj(=$!l+a&AtH2ECMnm-z7gM>@N+J7XnQXSiej-_oAF_N z#0WALwelA34#t9@iUuJ7h+#ud3*&YEyEBg+epmFKBKL?fylRz7ZwR0Ap+p#Q5XKy6 zDVqoL5R?rAu48v3B`- zQgU%b#J`<~@*fwk77@Sz01BTW_gnv3zv6SUTFY^5)ELIEIg_;Fnxb-{zEW9#kYd{}`YCDn zr9bl*pnkoZ&Ow|1AHfGL`33Mh%!9nk4)Y-IGQ&hKse^?V|NTBgcy6YxA3U|8aV$%a zA`p4azJE~JAX7CbwoAHYAb`{;K%m1u1^$nEre$J?xKQ555vowlcXH#B-o~uUzkv); z5mgH~-Al3vmdua-`vvFPDT){W_8{s^+{8>q$k_$(9j+@u>>09e{C+^Mv)bi^eRE++ zQOT0qLk>D6!zQj2uLdoRRqt>E%>d`8ZdxN7+;~Ph>5g%ehqNskD)};5Uqog?h&)&; z1lMxS@mkne(i$~wtWYK&f8G@uOCBwR6EAZ{@`+S#6; zyZ*fhz<K;qO(LD7*qQ>n4yB!GveQJ*sR)H z&Bu*wK)JSPr3*u#U z9upy{WGWe5Lf_P!I3&@Y|Ks_8IG~XXqd;BxSaj(0j79Am@zZjgaUx2jm61%M{M7LU z5)|ejX+7r0?|;SxalFR*SRf5GHTVkZok(MH-{BR|KFpENp7mqUzD!$Pl2ZbAW#-9k zkkJE|Thaaeu#}lWk()1{uhe7n?N)qhZq_y-_^K|}hQdJbo z&7Sa^93fL*Usd$ZSi$^nBMe^_pYXj8q{v~LTSVWJuhL^egD;tq;c>{fVqeR}c}K&H z%g`tSXe$H)=%S#mcUzTR%bJH2@D8iWaWJvPAtMDB1KXu%y~>3Ifx95p!uYc!T%VSG z7n>gBiKjFLeVOm#gj@R4tW+3BMsY~_MGCz#6~5l&Rv+O9w~pRB8{zftYJX7%ZIc}r zmugwavz5f_oK?Tk)pvi2gXdZ!}x&ji! zRhg;UEiV+u5w6$h=?gj#SAFm6E$JPuA&>SkQSLH&MohvxAEMBK89%(P`mf~^kp)r` zWa~rx4_`qAeV4FwE&&ZWPQMY$S_qZUtBn`0w~sR7$x9UUoqxSL7-!nHW*}Y}PyIx- z7-`gZE}+l`a7YT_eUNb48@Pura|Pyag|#4o6W@S9F7M!T+W!rl*?hiT=8!M)h75!4 z#l?nDG!26}5tf1j*CNnmxd$>A?KC*h9k!|2Y~{?~0o>K%JH%Z?en1QZI|N!eYw20sfxHu1J#TI*Q;AqCh^v zS8Y2I3oE&Ot>qxXXSoXQ&-LYd|5}S>BF^nI>Ty!{<|oi!M7(wC(-d(P24g6YVBvhw#iq9&3$E8lxZBj?+bY%`Eb{yivbQ^ITwuy z9{qcGk`j_vu}+0q6jzM)j_E`W#?n3XP>IWrF1k-x!R5HFCG3OMVS-ml*bo|~dLTb< zOCV26?|v*-Peb+Vt2%zjj1GjBheIPeFY4T34!(;r9P7%uXV^E?Ucu}i+X%Z<5s2CB z{0>AGx9A!Ni$m#jzd)tds=$DhDT8}1bIQ+yH;JuQ9j2voI$a$HSl6oN?294CgbQ8S z&*EOcy32m(xndeB*vcbe$j;D<|9Syd0WCTWMPr?dodf_0l|8c3jS0F0pluuz55OIN z>HU6?OGnT5+5SC)Ks+cw%nQ??lKu7SPS1X2e-eTw=u5~)OpK+LAjZzbl4c#{_|$#iz@g@f>W+7 zuP^DE`>%l`)7}17yU75q0BC}6Cj#NBe`E)0xAMi!wEq%d13*$8)*DS?+FM&K(1<#a zVZR80w6W)d2?PtAnDa`~{*rlZ#*+FF6C^F*mpWnB#(*x;BClkHziz)@+U){o0rP*; z8QAMCE{t;pJ{fxr-VaREOdm6#!p{mlmOUyEI#@1h`es;nr!nBtA%r=`p%8PD1D4< z4z(?o%YBM5M(@k1Ob8KT$OxF4e17)O&lV&pySv`~-QTj;^`-KR>^~MKef^M3y#2ot zuhRLQ0Z*7I2Yb71gkV49*dJs<88Fy>bRx<65BQAt>Q~Vm83^=rbHH>YvgLX~5g{?> zIFOmf6evaP5NPpfSmewVjXiXfd!qXH@za7OKCu15BtXx74Veil*0foBItU9w%sfEQ zPBTsmnQs*nmZ0CEVG+Id4|~eCq)u;QXbB7q02p#6M}E;o!^^n}Q=xVxlyOl@FD%PH zE$u{zSg64_cL2Lm-!IiBRxjf%8BQ#?ky*dv`}ss>V@O4eJKr*w#bRE>Rns4TZ9s|F z2@l0PD;1BIQW;xFf`p-09tD55K%g82!6>fJrR7abRia8ULvTV(7nw)+2K z7I9^8Dnalc$M4W`NMNlWF)jc?G}F583B|UY`d(k(7$Cxj&A;5EQ29#xNq)()r+sq1 z5)PM?R~|t{I7on)ZQw%m!8!KZfpw3p&-}8xZa}k@hzxj^%=Ox%yl=8{BK#C5$# zpMQRbF8)rT4;;D)-%7oQK^~vW*OFziY%U*Y75m;H`|AJz32GtuU?#nG9eZWtkMI%F;$g+xr5#xVQEbv#Cmob+y5H~ zGl&b&7iNIu5Ito4n*AYsZK!{=Wx^EXF~WLTUDfd zFm@#ny^voNL5d*o906~os4bYZ93;UR@&z~y+Fjlu?J@A%Nz4$_KCwXnfwJTlLzy(d z<^!jQhKNi+>8PpfTs#@g4|A7%=sM--<5P)M^^&OeG^r>J-Rj6ddw|H3r+7{0;! z3SYYeWC+oGxc%;l7sgt&j#4EfkpjB~RFeV8%=j7a#$7%~Om_{?M>>(TKVL55WevHW z;}rbc8wH_dFo1&5_Oi3)m4Nn=&*YQGa;a<~>Z41CcXucFjRsl|VCX-@OF^)G2SMgWlKag3f(?k^#KFGcy<@tW!xexd}jiFk3P!d+!I( zeh1xB=^{Tz_ zCzsLboQxEkV8EPP*JaDWKx82DGg&Zmu*!k)gT z4H^c4>|b&Beg2FA@2T@f2rUoA>=n?FV~Ll=;*6{}x(J0j5>kp9dBfl|?>M6DUz88>XOjA@Zy2QXkk#V$CSi>eY#4`~KmLb$DQOORmomF-C9 z7RK>-VjB)>Yus7(k?6~QFPM`DgRvlXC3qEL_&X962hElV*cjsa2G%s!#UXrcp`@~s zDI{uP|JY&&MyZ_oo+{P!B#fayZT(0=1-JJUx&C9hYm17sAmka>)dBKW91=`P=87ax z9`lA4cl?7VarY-fds7#&4sVyGsA+QD!bA{hW=RT!1HVrw7lhZ9j#{GZhP6YuB?GWj&W2q|y8w#SRYW8S+%a>B2c>&=yvk%ogD9JCmAX9{3r)4mB# zSXwdMu-A-OFEk{1b8FZfkUof(g{C~woaf2Erk2$y~S{79|(1a%*Tcfjh?Zp(xTU+A>?JQ~#yF@B|fS%W+5(srWa_uVhd zBNk}uaDnM_!{dm#7`5X=&Jeey0SyMwxn^ZAyuZDon+`qY^~&;yw|=De?_K<&U-g&$ zLeox54lPZDpjtxbByvjQGm<$ZeQqvtMnm=FAPK9W`q`C$%^v+N(KogYKswS=_2o`%5`BPKIEq!bIItRopQ z77r-1KS$A{qBBOn4lV!i9h%a%`&x66FGqjDzTxNRFwVl@HM`T?k4ic@T@)(gps(Eh zKOm6R{l~t=lu%j$qRb(Zm~lxC`Evi*vaT#(p>0{>3;=hTt0Zas2+tqsbP@Iqs|LZ) zG1g0(?F{T2J^J4*_P5WcL%TM6Hnwe3M2}luQZ{t*K3KwN(dhX}ZejR+6kfU^FqCds|r%A5JI)58!tKxTMaw@j#jZ3&v5K<=B9&T~lZAwvRIS@sa`XE@2_wGz*>w zS@s_vHiPYVC~^&DGF`tFw+#9zR0 zAK$-{m-1$}?ZuQvgTHsgpPmWtbWeY>{>Vcn2)!<yk%%u9#((s>_#Jmj zO9`Lk!XHbnOvir_zX7duBl4jdZ2y3{TcOcVm{4e;rVpMZX}BDiKQqN>TnJh-zI%){ z$@>o>5hb7goehzc`O9+nb*6R*CK#M7j|Ky?H!#-_<>gNh5fSrB1FRtd;>?9>)#&!g zOZ$G@t7S$G0VX#IF!a>1TeI${V`%O$<)j`Y~ zpiXdM1*XD0KnoHF|!v>cegp>=u-v6u6~JW-4mr&`+I4{xFgs*s$zh*|J;e(|fNfOzWuZvPc^`@UtW*3`v& zmz9})Nn0)>kPH^AI#+llG_^EK1SY}q{cpl=s~E&GuR*v?@C`~ys?y$yMuYr-kD(Z|^TpyZ`_Pup#+m1h}}ZV145N`TzU%ahr_)|N0)KjNDeW#%cj-+;6g*w^5po z*T2VARCY-1NiLu6P5l&?vz?Yk9_HcgE?Y=h*qJh)u#N(ND3x=ARx!`fw>gnO$Asrb zxImwrpqXsKSsohmzXI#hj;enooBvIF7tLnA%#BAy@unu9&9sqn@Z&EB_z&~5d6=|} zadcmAwjmoW)ItzP)+pR)gdk`K{QcMbr_{t|%{08YI?8YSen78ljWX;Cuv-`G`EJ7W zVto&6U60fJPP#9$=%=xS>0_Sv=NLDaZu5}wsLUi(_z7w>Kd~~N^o!MYoAuTI`mczO zdDQICKgq9xXp25)18?48|NcOfKTN(g>G-c35hVlr-3gXuJryR!FUn1bwk%lT>or+s zR$(eB=PobhtFa^ySE~ht+d5vkrh9UhxmU%CTY-l ztoh(bLNF$N&%@ieOwZ6?eQk2t=cHHf{?|`ZxBv7OTo2qRzv+LjFK3`=AKxnSck&4c z_1~7Xl4exIAE1*r<$WHt#tJB^*I~?W^c)zp9N5oK5eNy|YMHNRwSa~EO6=`(aq<3z zyc=zIk@OF?Mv1p?$2;0j-u_$LrHGoG4D&0~GS|^ui|>%#?5Hk9I|@`q&a)hZyO7Nq){ac7%xgB$6FW!u4di0j`#MJ zy)pXxW*ON0qV?(!j;3#c?o8^FEN6b$x6|#`h#*C_KYVzQhWCH7H9ZMmCf6!ke_zI~ zABc{%9M{E}YLr;&3_e@;m}Ve&WDeUv;>aW7LF#JX_AM94z0Wq+mLqk-Mh<)Bs=aB< zQHD;sh(d8Os!jMKYI2@Ds*6ZI#uKC*?BV>(Oh3c>2^RQJiLx;|Nb@&`SOxrkM**Z?(Rr6|wgyy@TnBUHkN2pZFH2 zD7}l=J%j!5ed6IEdPBaTex?~-Tijrhaqx&=31Z(lC~Ws48&Y9#^WT-3@}=T@Nvrho zwVqhQka+RLV}cm={zW&!q%cRb!_jfP=|12zy}8)BJp&~SOMe#{@~=Q@St|6z!rIGw z=jDVq)9e~(E@UW{y-s70*?(`bWy&C@VSZCT-cPVB@n=2*zI-V8r!modrY51#NgEoE zd{&CjZp-XNjTLxlMt|bRqqvB&>i<+Xa z)?bG2BOL@*2?)x=huS2h;nx2~_@dL>cL%6Z%|L**{l%?Du>a=W%jd3N5FoVF_5E_` zf-neSp|Br@t`IcJoQ674&_dGpdszNyKpa%s=jd#M_I%NI>_3e1OVIww&@`6`_6>ss z4{e=HsJdcb_AS1TYc2D(Lz0lJ8Pf(pz_7r}an)L7({#+XGT&dzy)}wsn6d)ilk2>3 zx7;4j7{s3XWg+lit3`V+LiOb%%;Tgg!$d@cAPe1ES)(Kl3akM|if_ZcsKQa0SC)KM ztumAOgh@kfieA3%7NcvP8wr26%T-Dj#Sq=dCM#xZTY-RUufeScb7^BF;T@0 zBtn8Avatj1pD!BHeZD8XiMMXsw%1)my^hE6;eAAeh%>d)^JEd0 znRKdu2ANPVC@~-+#YzJyG@mcyAGv^;IR5t0BKq_r{N30jLLm6rsfPrA#YA)uYr)oP zVr7W$6h{kXs{aJA&J2$guv+eOumYN>4gr+{jV)}Gn zqmmQ zdRf2m{19^z)tv-~KNe6WdeQWRvI!qe(?B|UA*e#7-^Z|m?QP<_%ioS=MvEvV{wj8n zHpaL0%fI!D(SwEmG`p|9cmLIO+lQpt_#u*g9A7Zs|L~RrTo2c#_=aKG7vo|Fl!DR@ zGRdSH>!AXWK9AZiVSSylqzL+# z8D^gURB%IbP#Ae*9wK@k(>)JQu@;dHvfcI$_sl=x`Uyk((Rp*@GQJ8%DN#qz@^lyj zA-6Six#BvsnN>W#jS`A+0Rr@Ch!_X}!r1H!uM&b=`|yolu86(w$b>8J=Wh_a50HRc zv!l`fHqWZ}V_s~3{|WOFl$WZb6~(pQ(B`_a)Ou0#}S>qFjnp!XPqaA5HoGQ6{YkM;;4%4PaUv3>ao zY|Knr7-%{{em^YvzuZUYCijEg{G+(Og`-Qs)tsFvg@TRqB)5Lhwcoel5fUhsabo$^1rrak`$em zFn3|h?+11>v(ewUs}xjGQBJ?Q$}MYl1Y`i8WuZucAC5+A>&cLRPYm}!4Uxm$p^&01NW5(gX#~c!Qi%v zEo=DriE1PVX`=y_Ai(Xlu#~&tqq5@&@%$W0K|PUpZQEnh?h_~-_JDVMa2~dTG}oZ> znu}YUiM|N?-Jqy^tS7;Zr@s`38HeJ*iv|wlw9IV|<{$s;r)N0@?Ed6Uf$;0-n@O!43*p*wKxA>w=`PsK_-yp>M zuZiIPh`18s_Ib&CcY#@K6R?>=0X>6?L4%l%EEuqSzip)B&^a|7JnO;ApgF0_TPTQ7 zg5_fmydVYo$F3NsI^)5`GL{DieJvJ1DH;Z~363R09Q3hlc{WCI8mbpU2g~OTP(El* z|D`H;@J2jpicj!+DG%OX#iXnwn;H(7HPMcZ`S*wEVjszQCfv3kW^tp{ck#+ejMp!StvK)5YxM8`%_y+h4COTUB*y zgnSrs_%F0kE7W+dWEy7kM5KA5DwZP=2DFiH1QQR5Q*p{r4opO_dW(b40wE4|P&`xU zkK`!c__#Xww>XyX<@`VOc!csbP>;y<2#YgLziye*wfD8)NDzhMF z#qr|n=FDVYVAIQkCG&WmTeEg!Q5qjbz-}CB_+~WlYS3RZ3Ui8FUus4VKJMLwLq`d{uakq&$w zJkTBHPb9sf+J03qjz9h`2gb;9R4#HV4WrTn#7P7447K3Ncs@fu&kn0t`&_s&VEIUt zjRA%iiky;PM=jVuSUZqG#v@0DAqMEFVC+SM#K?~|l|1MtkSbB^WITaH zbI>hja_vH&QykI*byc6~gDRW-UU*CIZXu364=9I3$8VJdBSJ@^r@ys*&A8SXzhCI4 zNPlmz-yW|!`QTi<-kb|~<+UDzY0V@s#~M~J?WE{C-i8r3*xdFu*hj+aL>RHb$D)c$ zuh=Jg={{dlHq$YHdFokHMIW|7F}(}F_7i6*(D&z~zm}?UGgT2BBQK0}#P?v2{Be+I zcVV0N<-2$Mg(tD@C2V#-v|{wT{-`KXXa=wKX$73J@+KuzSo< zAn1p-JD0(e?%2gfG#Bm^&Z?LvkgD6v00 zPs>+AU}I!+r|4EB9ly?wj{;0z?@WGHBV8>y+4V6pg_U{nM!n5i-`C+s|F~Urudmnh z7)f8hm!?32JSfc}Bj!@Jn8A`|Eizy5aD;TSk?{4#$z)dFucw7SCnfjfo6YeDFMeSi z!D{9G37tfl3kO4C_sBip@3H&VzcKQq{=%pYNJM$A12bxG^GktzS`o0Zn2-Ma`Qk7A zFQF8L0uL+zAjSY052N`$pD$P?1Os3Mgt$Q$(`ST$c70FNJ*^8XxH$l?Ng$7e9g!hh z)WGG^DxC_16_S{n;poZ5fiEJ(5O}a2npMrn=QX~wdHwh35LSqf3QmJ=+X74k;WGL zW;PkRUo*z9yN`$q_NC~ol3+?0Wqj_hmhjX=1raeZ8+e>PX#7-|LecM@Vwb0tS7ITq zAjK2H2C}XP__0Gdm!ZtP=@3l|@lRt+u=To+4Y%F^00o&L`)mZd;UNCs6X}WM?g%uq zljZMRxcpz{_A+yKzFV(c2?9Kp^@%s(9|-Y32phfaFa1?=`o3P>@^J54`MQpO%iw<} zW$~Mhg4(>D744=nM)!HEhAvzkVzwht6jh9CjT#JI{=a>x2bad;S1)@%@@Zb+Oa&5M z`_#SLT)rEc4aRv{EC;#BVARRn_&E0i`?72)@ju=4KP8{?_-{yvpk>V~e*nJlNxo@L zb^a8;zbz#qT1R{-1^zFMGE~Xk+gF#Ajd6X0VD=4zwZ-rf`_1QXa(f5j`H3)Di39o{ zi{fO2$zo)TO7KYq95BTDgL_zcV;01mD6xUwpj8(j?3~eS(+~~H8uk$qnZRH0+s|c+ z-;8&SB3(7&@v|5*|KkFfLa^@UlGo60Aolpu z_r9l$3cHko(l11xLiTR8sUn4z!y(Rj2~e4%J4tb;VWn!PNWFfU6)I}OJXwig6EM#oDa1hZJ5;XTE4qhxT#(QPrYGYWLZxv~%I0luz z55_}E4GE-){9otF1@qyo+D9>b9Xw)9r$tP|Xq zptVX3ihRG%TL0I+gGxOUg#x$^*HB2b3dL)S5NG&fE%X(Ud$_gEaWk|ndXv`*XVFw^ z&%?u5Ce*3QlJ+2X)VB@@Ys>A({l5r{n1KD$KxP`c-T!I1>0{K-oT~bJ))Xm>wTAfP zD;>uMim1TPyr63<74rmjjdM%Pir2)lj+nt_|IW(ZIc=}@`TwBejp5+vF{K+YraGDB z2JU?6*?*^hvl;zymvpo{2^#yJ;}qaKeckWy_2dx4T4@JB#unT~dq=(Y|L~_cuDFv) zS7_XL6X2RLFkpMHcX$DCk$|$)M;mZ~b};my54^NCHyBK5Azame&i@evPpmA1z0~id z+w4F*aN_uWWeKfdOO-x+9^u#f0r-L;e!#qXWA*F{^FCmKTv>8}$BVuL_{;gbZ_IaJ z0!QFH-T0Ms3FG2_)kX7gk^1~d?F*4_ia-io)%5wQM|@=7yU=Z0RXgi>>3+wKziM0B z!{?hd!cK$fep6DRn}rSrKAC4K8z6=J&ePmOB0$sT0R3;)dN>2K==AON|AM;|0qy>( z^e)c_VbBK|YuOBH9{NmwetFA;vJ6}4q{EoX9sMu*zBY1R0NMV4_YY{jgqqNe#~~kh zEqnihoDJjf`hJ+pJ6_UHS#AUS-?hdFuwz0%mf90Ev^px-Bw`xiN1u&{?t^ohZ)Ec? zjnb^)Cf2>n#v&J{6pdqBZZuzgRaI6zZM`4Iwln1HC)CWwCd_w2BYGR_H@CB0I+gv8 z{x`h~zT{rUUj0A7000SjA^dOzIOyWkaew=c+;`vn4}U|{r+bWbUNi3fN9qvN4x07J zfBYd;QN_4GPXh*vTN@XQ45&vf)*jsRHPFH6p-gAFgW;&kKJM!V3%j-WxWZrVUoYL2 z@0voDj*$3FSRluO3G_A?97s}qdw-rCBu+ibXKX(9aCJZ?zK)b_|K}>8|NNCMH;?7b z9exAKtFoKeI8*=sVOCHCfNvVgz%LM)4aU7g@qQggw2?_o|Bu*Lfvrjh>z3~=Uk0X} zrzoK4zEVXcjF&;%Ym_m*8wDHm{?cTOB9z&fW#8b*sVn>8NV4KI1{S&I(c#8#DH{*M zXkRH3k;p*<>5b#!{6md-3;lQsF~a=_Pk6Hp8Mrape+2ZI!^IpSN_FvfZ^Y5Vz9bL(0)P zNoCy$aDfLQk$5y-?LSQ9zkO@$Ap!|RPC;*@N8=>&WW)dbf$P{yH~;zxK@j!8dtUmg z@QHHM{Vt=p4tDgiaG+2FTj1(*gh%)ceX90=9;#R#l`Yd&X!JTfq!2NdgDn84JPlwy z{KH>CUREaWy_O| zjJ5vBweG)_1eNfByu?RYVdxqM;~^KbXGfbIlX@#qq5W9#7v=i|o2N!0{&Ozr5GF-bO-F9ax|m-6^D`#dKc(=Nap_^@IhxGEGXb$t>k{f zf!WJVmnl;C<83h=vO)YI%a@U`m5^CwzQy?o5SBtn@Qlg+$_V${iSbtZ?myg`4nF{_;j9{k$F5v2(wZ$>x~Myl(>&eW#Nre zyo3`}bjqX^)J@E5ma~7tDW9|Ne*B^vAE(PrgoErN6%8fIHY)*N8&(y`MpUp?J@Y77 z3{-Mu>v2D~q4ZW zTQBj25wK?^gI3Ap7`qVyj7Nj)>2xE|J3RuYprxjWXkf==mG~XFZ|K_A{r>s}LJq1Y zeg5TJTv$*h@2{7C2ja&H7$|bly#t-l10Ve{f8RP7i(bSJN*GMJ_7V&K7X=X)vr1Ye z!ioi%(|pE%{96%V*gV939w3D=65oFv3O6|g6G)tbdnWmsQ!BK+j`X@K9$}Gk>n?;C zb>izcLrS-uYiU z0Cj`KAuU24z^y790;X#o?+P!~gy20t>BIaAP>tWCVCen|*hhSIlAl0${|khpDL_(6 z5Wf)NaaWmC4-Y`1Si3fTee^Vf<02mZBz^CMtH+1`t@zp>w1P~UItZG_`Aujv9UZ8% zlqF;R4uxe1@{l@C03O$z$MY?}nA;ynCMEs(UEQaCO8!xo-+0R?iJXpc%0YDvy5@_8 z7KV^Wd2yCV23tAu@8Vg{IdB}L`W!;;@m@QTWkbCb#-pz>;tzAOE_wTnKMMjl&c3>*PC zY}GFN%WZLmzQ4y&Ay&f47u0c_W*94fX_X`-nw;$*gA{pGMr0%ztXlZ zmkoGtEMoKBopD~|*j>t^kPZZ}B+b*{z>^d>g$Luwhi>y=E{FU{>Ei-k%lZc)AA~V`J)F2)$tv$)A&3SxF%S8Fzaa7PL^nMUOK+&hO z{#N#E<*v|*@c#S9R_P$C`U_GhyaR^GX_^EK@>$Fe0)=e3urqis*ez?x`^+Ffj{l+$ zJs1g|*H29Q5>Fnq@tv8Q?7M9;2BGF0fgUi38Av(@wEo-kA=#E-wVg={&w3Uv>#89+ zCHA+CWqY%nqQt7Kf|n2gDvAT*(3IT8l6}$tb5_=bul#{0cki)r!^}=*c7YLS_xOta z{vyquDrcwAi$A;WBigQz$WREzm4rW(7Vq!NVE{-VP$=v+dWqw^DGe?9j%gechk;zv3gV3SBRLBjhAxUrz^uHjrsWp#3iq+R*;n>!GN5 z1IdKdkDp)rD*kF^dMM*Gl)N(kV}I#%C}(<#aA*@;e*=LaD$t(ZhRF!~m#NUeckHzo zPRAKRw*Lns_|-HKNL%)#5S>ZIoKaxu;ZdXu1$%4g`dT9Ie* zA1e-a4!eI*#glccPr?FA!`{>+_&D&=MT z{ij_UA=mV6f0fl_DI7>ZK%t}=GbGmAo2AwX)xMx;zwLl6`VaJms#F30{qq_{CiQwA z)V0<$(#DFVqkqco(S5gJ!s*rj>nAP0)qN-qVFLPy(^3#{%NN)&Pqt$*+ujZgV`X;emNMwNTeKXpnghk{YTg!Q_gIZ90?Y*LUnwAUW_gymQNb$Sgv5JH9R?1> zm_A;BWD!BTZUZ2FdwD&ggKipC8x3?Aagt^@3{&nq^OA@C(B)bNY_Tv>g^&;g$H&A` zGR4fX1}K9U2}=DccpAen2-{*Rh_7jBLu`0B1pzR?L|~FeiY2o3;l_Ufjn5xo8I|_s zpC*pdJt2&^VP;rVzi*eM>7nBWMT_4GHz1?!wJL6d`9jDQxWJ4Mg)-Kr#wT#k#Wisk z{QwoknT34G550Jq#1FdDZ&5DQXRz=L6^us8of2}5C_sisEsa6RUP&Q%DOXTv>+X1V z#hZ>E<#tHd`G!=`$^(D?L{?*qQ0@VcnvXokH_8WW9E+*QMAOM(XNaNX-DT7Tn*d8p z0cD^CwhMq50vy|piEEE=wXU)&6kk7sMUurHsrNT7HGnZU-Ro803@TOg5C8NM)M5-u zJad4~*XIBDXeCzA(nVj-rg9G};~3Nz_=|pvuXzq4RGD%;k>JN_IQLGRw{AjFqXGwG z&A@1?8(B_U4Qv5~70nOi(o7;akryZWRYw&ORLNbVQ3z3}TNn@w2pVvYmPZR|sGbvM ztO(1_hP)lvEs|NLrE}I92WBLi&Py`zof?o%j3!|%*?(_Ak%19m6GF_K(SmvK8zz~u zXj& ziW`=v`e-B;f-d#~>J1?gJ5;)ooY?<=kS1Jr+Xy3G^6A2;qX1upz-To=L+lN~psaSu4fv~1lA2Q67N9L zma!sALdvkF!vxH3?eoPL7^8ciUt!I^qi$S#<+835WR(|>MYTZf)6=W{Gwp)Cjp2b| zCgg2~UqYc;mO`(~2*akwbFV7ryB~h2NFKgE0)x9=7k{e}y1@8BbhLfAfA!w}B=MIR)A_7dPe#9Z+};4;mFfWi43 z3egevhms!>VJ2YjPuTeR5nDqEFoKDeRv{s|^eGqYWy8SJ{W#!-j$ZxpN2}$t4Rrt7AWL)Wiq9QfaN_e5969B^rky9R{HsEd zj+sz}FhM{N^40&}$XA`--U)YH9Rabq4a7g$I(kJH-s|<82zf4X4pU&w;`9plfo+!H!~5`KcNeg%=-)AQDT@ z2l=?X&FQowV-tYYH8y8nsC-4@Hj-732##u``tM^~=yy#r?eOJ~!(zw_Jw2Ljfu36j zWC9QyC$>!NY=~^J1&NSKk+;W^M>#i5f9AR5&r^l5aev%I!lCF|BmC5_GBw@aT`|BT znM44l79e652v^L`1M($;uoj78OQLQA2FO3*?Q=y32p#gjQVieY{x5!9jbw5NG@pS< zC#apB1ng;0)Hq4w^dv)L4hUOM z?+AMzUq5Q8SL+xX`}ti>zzMPq%A9b)m-qp*aO@-3WkKRRRAfj1Wf}LfntpxRI5gm6 z>VCY>o^&Kfv2EyoQg7RNMECB$GZ}vGaDGah02CS;#`#oay5D7wi;3e<98Uq#&dC_d z*vDG(@5OPH7$|MhF(?F$MN+d0;>=D>40qVE9z(_4JKQF_s2q>^Va7xnu|VYAMl-Dt zm<<&!%LWH7v1DvLjKC4rQq0_g%fwmvxb8BT+6>#Nj{}D{kLBZ?VNk(PP@NhN!5^eJ z;k;i_#Xr|8`x#$45MiyS8-0Mne=cWdnkbppFJ4;TUx!-5ViqjEdoZ-uevm$;hmIN= zE$+XrnFov?^?f>f!Y3crx2Z~n_TK!zG=hp2WxQ#Z3@_sn|NnO&s1Nw@V=XWrK*Aq< zTkp!?2# zdd8`Rtp?&#k`lXYJ=!hd84RQn$UPpI!lz}a`#UjU$VVXrd_ge<#JVWWgJAG#f=vS( zB>kSis@#8Gg7~m#9bp*5j@bMU_r?4Vwn6;YX$9qaXjx8UyD-U#bk`_XgIK~^cxd!8gj1kkn zn9M8L@PNKYjxuF?m%<6|#x|Y~j@a*;<;Idv>F7d#UKz##*nqw^9$j$3OKP<3(Rp}-9SR~@KUmxV1|{JVce(7%<`wIFu234a1-gC1qw`Bxe`3 zXTF)J3X3chFfaj>EocZ72L5Kd@Z2Jx!Pt;-+kHH>E-#PD%`kjL2g~*Z!4Bf+93PSc z{i0Gq%|1uu8DH>%(+JQr<^gar{AaCD4CmfiL_yCK_A*#!#@wHqK!v>cIsm(k!61zc zhqBAqJ!uX>Xad0op&JIk{!>s)r`yW*2|z{!0wF+{fbxWb15O+fFTidCEZPT-p`=|0 z297}bafu(R1BrO|5sa-~9G%{aiwbpznA5P=AKo@N3awGg<6TGQukbyfhDOiXM<#If8ewO3N4=b7vlJgJzK7vxv zV6#nmLE;IL>*Y&3G6=FS5x(*d;vyjYf`NlGRuVwl_+^NrM!|}MF(Z&mLC|?)`RES! zWH!`Mb6()UCs|iOCH3z%m~y=L>AkJ$+D~L{9S73I6=yAdlCA~u=)$UI4OjoQ6FSMFQ6209s=q4Ke}Z~PUqnEULxnLPNO(9DkMv02wUQPNf0!F>59Ci5LlfJ7xH4kP;~xL`<#Yq(LzRC`7IyJkP_p ztIm8tpp$=3M>ht=oOKG+UkZ4`5~nZRH~>J|c3t`409y2*{ItLd-3Ipokfai|&%}a7 zJ*J62^~@RSWp#Mt_ZO-mg~_t4A7=%8(85doIbQs93VZ;7gbF1CY)P2tn3;D#!J#G# z*<%AqBfS;ya-gOMR#}hpb@xI57!nFPd#*mHY%U~c*Psz zg^YMx^X>XO)Di9?M*;TbjAMi@8woH6m$v)`a>YK|C<;HE}&C^ zPNLwNEkL7x`l^jF+t3wYi>CrC!l~X{wu@{eM$yKNBQ2BQH`(YD@4;Q4)G&U<+NtAitx*M5$7K?V0u^#N3`V@GspKgH|HwlNrK3kAMo;$rn zhb7oN(ZbDveT8R!d9e?OeVAyFHUvW59gPR0(}x8zU86xLgY0a52h%c8WjpvZWJ(RF*-PcNgbJjof?wv@0YB#=zZvLGcD(R zIgsS1>a7KOE)Jg1MRY1X1_rd6>zmlJ7=?c_A)o{jA3&lc<|FldSO5SqDJ8fd0>A&K z!eMbh070kq`2P$RE(BkaWXmsKeh#j<0C3$I!^EICKS>w*1TX;Fh6;U1GPx~3!}Z2; z2oC{Bb3QLz_V*K|MhFA}#!Fxgji!YoI)DHB?-joL@4o-=|HIw`k*iW4hCN^Z|Nrnq zz;PNiFaMa~f(p>U6SU&GfB&2i;1`Kit-LSzf0O_D;a>zcyFhzlY5_8|kAMI9AAkS< z1RMd-Y*G_-d+-19$AWJFR1S~FBR~T_^l~3~q0KD|-^4$h&7$h#vxgxq-Tq9*0M#hj zKR!Kkj02PZUlE015w+{HWKfZ{-&_S!eLw}jyiCOF**qRj5XEW#|C>z7$Nmz)0`Y4o z?K>tfUhqFGV730B1U%rhGTR1F2nOZ@O3gd#Z~Ok+Aeq$!Wg_~>#LR&$j3}-Btl8VQ zd{3JY09~eui6+A5U>r$B=w8nBi+b`%|^M7#7-&;S4W#2yvu8}KTwwSWki&Zqvf)gcbBy;Pcl z^OYGcaE$v}dIx3uf9|mTzW~x`Y~0z1q=0l7%UVaX|9<=?P0M%$yUex)zalW9%0q-C z==bgycAXW!eG*Ah+V7ViWV`?V5Cw;L0}I!q;6WhXm=IoA-i?s6Ux49&#-d_FNCZr{ z10&NeUA8LIlmGwVgMca7hQ^P4fB#4!;Dj7261wsa|KaIXz@eny_lop@OAp~MMu0z? z42ivNuloQ0_#og2qh>ZFd)NQ)_#of|B|Vtjf^U1m>M1Z%X`laqgb=_Lso|4LmdER_ z{nd%HZ{Q;eHYei4b8Z7hR#5~`Is!fPdGMRT9i4%rUQRHAjZyG&sxoXfpB2hY#2b~* z+x?16Oja`_A0&BUaDc@RQU%ZdnG6zjJOO){;6kP`#YOzPC!>kIc?bG0>reByKDZ!G zWJt9wyn`bMTn1Dii}U$?)PzHNF~O82O#;E1`zG^c7s}h_@8D*_fdW z0O+(9cOV!EzNTCSbmh38)~o=au$^h>2)P5=q0Q)b5*GeWkxMwG4@%|#*5C~)WRwsP zLDJ1gy#6b@lenB1Ul&p-TN`X#1#AO<>cM+N!zGs~;g6%np@j9nWecy<9E|m??ChaN z-pk* zxb>^!jNS(P*^I_e-_E}T(3XHdkp69rcH%$KTZ0xovFh)e{s40iQFNdlA&*AG-?ezH zb^rN(2p|{?D-#%Ccl%JzQ_pQRQeOJ<|HtCTAKh>K2Lv!n z(!+f%j1fGswW$C9{yEYA!vKYS@GwZ_zxYHj2u^H)gp0lnn_PYXA?yCH|NsBf1Eg~Q z{Qec?X4l{u+a+5qfB$Ji9s~|$N$Y{2*qW`aVBv$e+!UApeQ(LI0GV51*k=7HuT8pz zFx`J`y5agh;FtG3OwHUn8iy?d*)GcFFt;Y1@EEySuo4J?c;v)GXsy4Pet-a(J9 z8EL$vXqTh#PBhy^t}*Oe^XzdTE8JdS%}m9Eu(l}f&X0x`nU>F5(bFOqE6%* zX-6|%_l-+jI}Cc5K~>SajCF&Jgs~s6V6hBO+v*6v7DZEX51)FwbEHe(6H&kJEx|S5<{!;4AS;oGcnKH`o(N>dfHE5R1LSB2L?X$B%K@h`ETGoNcWB!ITYU z0vYcK*eUkQGbi9gDunLMO3*IIExy1u6mT1IOdEvaPx~P~0(b#a*^h`W=@gg%D*nsI z*S+4Ko{$7DQ~&J>2utCZEuVZEa2iicP=aI~fVn;BIEZ|l>3UDOHW=3$y#$PnFg@U3Iyc*9#N8nkd-7QhB9gnZZMvAEmM$@07j+pz2X zOa&1-$OaN>?rn8EUSQ^8cjK6_T#3NffCT2GtzX}p06X|7EDrzSm>8AzyLrW(lvko{|Qkt z$H+Vjb&b$%BZTy$hQD3^_QwAo@RWP79bGI!LX|8S-89@m_7D6$JkU8Hyi{g}StY1q zaPS|6C5>^FY98?RO1|%`@cd!rz9bN9G8q&c<}Fqi;2<8Hmo3n2Q$$Fequ%^v z#0RDC=P`fh&~8izM|`z7xDkF#tBFp5<#J~~Ium09z~CUSg2hzC&&O(lHzZyD-{L4l z?9VbG6Oj~WPz3TuXbdv70_BG_nA`oWsNP{5-{&{p7kLXkbm=GZdgw9lm&Wq|7eZ&3 z$rEhdnLo9(t@o1`+guE@pmCA^P7@0XVoaVdbQmix3UAu>3q#_yfQ={?PDokmkzmJ%JDh;6O-k50X;W1M;S?qscot`bUZ7zmY%x z^b)H4wva)V-q|#|ulN7`1iLl<)R1l``=HcszyHumMr@x&4Rj3>Uw!}RB@V>U9=1aY zQrH@e%=rHSz0Shs^n(gIm_bPSObi1QdXwI`9YKjiJF15oY;`bWG%L{?{<;1+WOE}0 zd3w!zDqfk`4(%w&qypAu)8lp(Dtd8!_Hkv;3`cbzzee zwrFnQOu+=ud7-PZb$G2O+D0GLkAFJkbqGf8rY7|B_eSsjW zsodu~aSk_=)M;SqK1%d2YvRpEJGRnpAz=SBOTSv`plG$50zY$<*sY$qJM}Y~;$xEc zwd~=jyO~JU`rk$s{;)DIyq1i;1**!OTq~O7sgTBBzrfd6yh^8hfAF%wjY;5yEQOLs z?tkjLYN2|`cm<*Iz;8Zdd@4~tiogdW3WlYicWbsl1_UqAhQgmmz%WY3qH#~>AYCKA zKss@^u2~&mhxZz}as&teZ?D?GVaHgzueV1;M1a!mPFt|D)nbg# z$td$Mjt|^zo_rpk{*qlJzv`an0zXK~7>#fZN%?~y4G?KQh8%#{hy=rSZ>vsdW-36u zz@-C#K!3Hu5Zlv{Db{o%*4p;R??qiaO;B_(W^NnSZ3RbVt~S}m=g3*WVpLaL0Yt)1 zDkIU5xV*#BkeFp--v{6w>Szac`8EiX{e+b<$ny7N_kVA_^F>uN6Ij^AP`ArqYu%a( zJtPs5@II+_@cnMx1DDGByouF+qZ-Wo0VkK41bdbNrJ!U)vXmtol#8aqimSe=jCigr z=K7cPbP|AQgoLU>RYTKVfA#@z#eHDzdx)q7tF+O-w&z+2mNdS&^I=ATzj`t5(cVNC z;)qQ#p%WT!?s0Khaa*~mmy zI~{ldi`}`7+=IYsJUzgL><{NZz#!C++mHXI2#f*R%pn2CsoCaTGQ1WYhb{yGBq@a; zuUxX4m#F&Vd^DreS8H6>ij^o;l|W?DrO|{pPn+fcX=#W7$qWMjzNK}RYJ1(=PrfT7 z64!5t3p-DC;8(xC>P~2E{^e;WPy{Cs6G(hI;3_VUjV)pSA6R>?qP>;D8Si@sERE)O zrOG`splF6ZUb*Y-NKzieG$^h0w_{i&Rp9t&;f*2N^uUtbr zB%a@RNy(l&MC1mmHo@Mw^~-#=`me=R6=Ff*azX@Z;~lb;;lHm&&<$|ge4?%zsXiGTieSgsv}Q8b=L zjeFeOURdx#TyJn2$>9}nE;Lim8#F9TuuUZLhuN6`Wp7u_9jaUZ|N0fbe}EKQ(?luP z!yG<9#oZ;y1GAtNG65uQFRfED(g32I6F)!U{u>iqm9e8JgEu{YdGXG>Mb8XXgZQd*(dVrqY`F;fK`?M@mMz{>fm4^&ipZi zQ-wec#_UA=0=fo7oCF4X3f^24!H(8->+?fK+Kd!LXhbCgi#%Sb*0k;oy0?)gz8}=V z5Fx&{3wYUa({mO!(F1Kg;L-2bk0BVhra7i!venB(yDGC*cRB?W{{MnYg^ zz?WF+H_Z0J`E!8xtOdK6zY)EERjQ)w)mqPIy5#Hk>oV^sLEn&;$OawCasvFK!PVZw za8g&*Ug@8-#Gdf}-@uPcUxIT#|ILZK3sqMJds^r1E^J=JTjKyXCWHUZjn@D1|Na($Z~$Oi009LG zaHG|a0E93A1P^76>#zvf^x=Tz;nV1{elF6@|NoFAIr9Mg?QprulD3MuOaK1~K+)W_ zN*coh266}6;&&5ezyI)-CJ3%%4!KJPS#4-rQDayJ4KEJO9wH@Wnri}F)2o?U`eg%K z$g>x|N$jRw8kz%r0AgE^2vSynun+k+4gZU~3g6jfm@viXG(1pjx%mzJu_NuMt}6_K z$wfiH_gc>e^**yhB8Hc#*j2&7X&-~ta>1MrgF!P7RsKH|T%XtF(OiVc=S`WmWM_)8 zhtzlhitk7_*y@x~>q#r|k_wJ1w2@tTZ07?Eu?9(} zwkrKnG8ww7{PBR_c0Bzd`pC-@4G&nAR{ZNIRQ=vtXi-@ymSh46dY95Sdp*K#Ppc9Y zM>oGYR#4z9nGqK=*f!6441hM@IeY0PYhW)u==Q|_5Of_r-cQb$z@DQ0YTHM+MdOV4 zjQL-_|M!We(rC@pb-aF78#%z{JaYjm)*dXU!kkjPr*4a4qhs<6tn zul#XQk%M5p5gC5iJOBH`eusQ%sPeZCdX|Ge;yw3^d%s;_oD0e0&_C$l1}A;y{p}c$ zzhl{R-b%@-pS)?mMWnWD2b>LI37@0mden82h6Zev+-hRwaw` zPxsMyUBTGDZ{f4G1Z{Sb>*dptUiL3L#@!vWkMqY<5C}&|hyH5AaAWmR=DxZQ-pTES z+vyO`yRpn@e(cP=`a+q5J+7RtSEz7PKVkf|!r#HRZ^NeBf`C?!1}TRZQBq{yj}BD9 z_$$*UTz0qEY%isjz-m5V1c=jirAeNjHDy=owSa%nY7c*RvwGeIQTKvaq#wU{u%?J$ z;StD|$dt*y|NrbEfZ&kimLRor{4~oC0^x$ChqpwZ0096xbQxA;jsx)cU>sZ4<+fmV z*)sj2Xa4}eILYgR;7SjS?ZhtdcCEqB^j5;tgXot|10uLHeI35ufK%jYJ$|3F9v*G4Rs*rc6_ zq4KdlAUeam$Uo4*PxJ^(VjzmhH+<3y3kfy ztD5Z;ztyc{@CSNZ8IS+mLGT<4IdW)dUL_k|6mF&%vK9G5qrbdgj!kl-ML`aV*ssqw zH071%Vw_){4;^1vpZ28}!;n?ITzkab{Goi%(j5!CP};#(Fde$UW%H}w;aP+H7~Mtn zI}cNUZo^v8p2CkoU*Yp0DP|PQ{sDkw3gRw9HEcs|0}^t{r(?hV0f1;LTj1^Zh1JeM z`gT&wT++`Y{{Y|^S030AesO6&G#fFi<%q4^epm(p!UNz8hkE<|`3ux2?X~JeX@+%0@Zdy zQ8cBxkf%3GM&!2Fh%fNCGJOZMts0nnnNs^t!!XB@ACC5WwsJ`_-iD9A#@dL?WS1FN zf$PXp@-Y*v7=w_TD5?>|eg@pd9KuPMNXT`S1>?V)jBMEln)&qWC}sEzySdl#TP~lB z&aS9sxS$4;@DY|dQjgPv^6{kVjnl66>FMqXSsg$J++~h};Dyh;AEf31fZjjn{kmvw zc5tih%z%0)q)^zN$iwuphK!6Lv53V!G+5EED;p-flJ;Y*Zmm~o(n*2GMQzc>97h2VBm{$NbjHa@RU)|E{Xow!C>>e= z9N8Pt5eK+jHOt&DJB6KXr^4p{lh#6mt$e?Gpkrpyk_(27?Dj*>^PnBb4Re40X8-(v zfy~CCGy36*iD?L11v8tLl8j`uDc`)xY&{jF0z@t2Z zC>>&o%1!byTM32#{d`ct*|8wiE|$7Gxng)h&uL~bnA83=p!^VS5!uQh&-wSYAqU_+ zr%1vP?zMw^LNj0lHqi#HT%F!{`R^gb$?y??!$8_}5+SN2P<=KDkU%do|5$RS5h5Fi zTyXi@_AkIRz}%1s#wPs5R*Z?bnmW~zsFQx7W1>{(t%FD9y;nn&556VA+xh=PkTWXG zNr}tJ|B5lm|F;w2$)DA-1O-rIUsw)XIb%KI%La;2WV=pLFE** z$rC+o-TvFSQ*}*{NWkI7_4zyC-PjmK!3mskB!DGK{qReyE4g)c1=YewJsozC+;Mtu z7M0+T2InY%$%GOQYQJ&{n<#`@Yd}@D%%BgJjj&>{JMrs_29DAr=@Ubu?PwR{3l5+o zkLpVPWCa;+-gqPuer8HR+n&9X3C4rZ(-PzVnC2}IrfYszpkU5$0O#NZ&rLLS>APpTx@hNieb~<`vod9!; zZ%Mm7+sj%1M#PT`D$6v3z)aQu|2aWP4^MyWdGD>dykxABlk`k{G2N8f`H58-TX81W z;Z)}_XYSF2#)(k(GOZDsY9}1G(K%7rC2o;V0<55V-MZ&~k8(FA;FN>s_h~CWl6T1bKF72Gt+?iUnjf1x+uk9Gv{cqW9Ey z17dtT7tgx~nFputBvB-=NPq;p7uN%^NT9iqRcXiDZG7x+R5Vg^x8EOxoCp8{&>dN1 z)BoO%ZcPuTG#|0yVyiv!0&=|n_z6=9DPfOWn`vt$TePnC-|-Mkucv2Ug3a6j z1L!R2bDSdPNn?dZbB?!A2i?N@ufM7oT3e7rItpUWG$X)iFn8WK+wZnjRxd@l9BV5k zt77vsOx8Ts0zE>@i+D?mwbXxd{W3-w2j>5IAhn+NQqd)0wqH)Wb7by4ufZ0{{zk{2U?x^~e~|i*A(F!cJK7%i5$PA*e?FASihq0l zgo$Z%;cRSxD4&(&-W9Sa-o&-4uX`cb!9GY%Q0u@kFEkDht1C{NJqZI6Mupqxbj9Z9 zYZ%XqAM$Nkt4VUu$%#OFby8VVW-gWv?}|z%(4Hl^AyuPL)g=)sx6fo@2ZA{}HVUFl z5o3DOZ|=Jl?ltj96^7oWu0?xMs2&3XQZKJT?z;k-TXInG6)H)u$B_ngOsttwy+%FO zPF{1uz8o9=Ad`0@T!6jZb&upRhPhCCeI{A&Wh zk?#zJX82*+zyq@y#@Qey7*WLt#bIm1sB^#oAPtxpo>6JWhCPRGoB+%fv%Nf}K*rW1 z2^cEE>k6wdHF<>G&wjGE4%S4ivGa(LgVu11@WdQhrtx%?EA*)rzcJr;}}#YTPdjCPDL$lkt+|-beNj&b=n?K zH~@+vL2OEA$3O5?8>t|yIMV-U3IMoFRQ~<(3KSFVKwGuho}#H>ek#jr{9pL7Is!a* z2mGD`f<7Fe-Tl21H0jQ*2yFmo?QM=FcCeq*4=w5_r=h4dV6QkMcw6U2F7|Y9QU#ny zyZt|u`(U*trG6ImvxH6V-(^?z?5CpI5NCLm0>!1?%J~^U=jzQM64KZ`T=g%d1qI=B zkqA@#RZ6T)CHQ>qR@d?PGkz+n!5$<&-chL@%gkXO$OHFbvKwg^?^f@R@Wn98nVBFO zFN+w;`>&oGy@FD_Co2n8t)Nmt+;?u@Jd7$^UH~740IxXV;@p`(04?qa*r88`LW6nN z_^osfVCh4GEETVcJz>9SO&Q|ZBn6%K4ORXTc{mE8d^^Mh*}izloP`#@8YIK-_bP1L z{Q_7Q?Jy9(@0gC-0<-EKBN2LJWTaZS8KF>QKu2#W%W?+&3##A@X{4su>{KyUb z`TvYwZs{32EuY@Kkazv&!E0zUx^iV5A(zq2P2j~d4;T26zeu;D1n;$}k|MvpeI^#hg4yyPoPWCaq8dslu@tOl;| zqI2M2)WCAWfZ!f49A6V-&SHtE62d^xpI{SsD2nDU>s2On^Nd|a&VUSG9?qt)bh8N6 zsz8Q5YM;T5&ezxJ0+rDsKn;l{q6OD>74y_f!I%S%*!*5XL_B1;Bi!qdjfKX?b> z&{{djAtMdTUeHB0KspB8N=KDnWvc+dG5&`SlgZ$~GJ7@%9S{?o#u_l<%-4a`gXW;w zInW`dIP*VQ5bXNbZ~DLff;5N5njKGxxs_=VC1_{sVwcaX7fy!-0{3O6VK`HWmT%Ln!qimmlc7r~pkA zhKf&FU;+66)^4EZyNv+DVc;#;5R&YdfSrD~e_`K>rbPcQc==?4Z{2|ZJR*nOyWD0Y zR=2)_BZ3oB87l_#%`BUS`Z$9}Q03`td@06(29e9@my|uPLluK=QNoFZ-geXdxFmy6 z0AK7rG#Y=`DVrlYHIxHnz8zHwqpiX~qYe<}S6B)W!cbPN^Nj_-aWX^|^Q1$TwJ3k- z)qOLq&61>RAh2wom1f02Vjhz?lpGG0)|lV^=QEUpO|{i@`(z=E`_u?AOv+gPhC(6aP=d0`&zC#=-x_KI_0o`@xVv>OcZ!9|Qmd-|*|g)WQjke&?0DMd$kZZ)P~! zf7!*|>HdO4nRW^@1EZ21-HNZ4_N2#i;a!g`jpmPa-QA6c{({vvkH66v4my9O4)@x@ z{G|E&^1uaF<{Y>*`|+tS>_+cF|AePUu}?^;>jA&qAAGWR(hZHV)G24;+h9-dxDYbO zFqEFT>Rf%({RN>$9*O1u@kYxP$i6(s)k{ArJ^RmKce@$PJNZ-RMk!z$zWJYfs)Rq` zsCC>CBHMZ^Jrk5sXmzO`FiA=rEAD0J&GL)oH>dSQPyAiU$NlvWWG0qyKVlfhd{^)W zgl=RVBMOF~p6~yH1nfMOhbiPXC3toT1N+@Q zXwDG$0--K+_y*6#j3e*lS;i41H;Ns!7zfxfD9&^X3f0*@CF`-*;POYphfOv6nHQY z*=-O3VPbb9J#ktD0rbKwZW`NWEqe;bC?&E>BK=bU7O(;elaAX8P+gbsQRzT64*Na>Fj_={s9tyB&`bX|!U@8j07Kde0J(|5@{%6&-7F3kCzLW9l<*uD0gi(;gPBs$ z7AzjKN^qW=+VF5o`G2r{4F%R%l&my5XlxWmBUs9Na1p+|%gwt8oU_kP1t+V?`Wd#L z#KeYm>j8`#vo*dEQLj(v8X^bMl*{ZfmuKapW5NVj$EqQn0~=nNE-m51%@iZ7<|M_M z%~B7rMxloAO)6Jmr3EjS6Di>8oPi214>{c@`L1$t*Pv2vjNR_6ob+Mt;?j{b1TRFK zt8Dn+2U{zY>rE(>g3^vW3WC&d1S%{w|ILBVoPo)Tk20Pyd28rA)^NVRdqkr^u!RC>TVbI*KcnwWw(|XjSv0bDmwdY{PWT203Y6R0096HB<)}e zfNKGYl2qZXX#YSpEC2)m7a(xO91j8}z^IeL6jJt+32yt}|Ho@v?QL#5=;Ih`40d!a z5FIH2!-Y@q6BDNaOG1PG9+C!@F~sucu=!e#pay>&JP*g$!kHKw6c6R#N*069fA4X3 z>ddyfC;$Wns1qe}ff)GcW&sK8H0z?l%mP@bHxI3WS3xL@=uMOUU;qodvoI(j{q|TC zzG!YXP0fs8<)^wDwDGBb6_Em0@@L#!UH*G`qyn>=|5Pv93YuS3DZq2+ zZPA53?mZhuivE!|@V;HE-^6c8p~q7v8viQS&tKC{AOE_K&5!^|V*n5cfya3y?&fd? zx_}BhAh2PmdpyW_MN42OBZU6)H^_Jr1c)Dxp@6fFhm3Kqd2jH`5E%en+yDdtJOl%W zz`$XF#I$fzV?zub!97htyurI%;~5e~{j)j(DQrPt0Azcv!|qXCl9d8(=j{@17Icyh z3{!oH83+utlqWSofACnC>YNax4BH05AcZp2Cr-nE0yKf)8QmTDWv*JxsAwy1n~tCpG8zZr2X@$p765+q)Pe|SqY@@=E>vuQ ze1jlyM9u!8U-%pxw3!{02J!}?SQ8SMGyOOf&s097b%4{-Z?vO{=nl!-#sthU_YvlyA|{9f8w~o&2)M{|38-6QMWf!2 zkP|Qv8n%HVL_rIPyW%i(lrMGhH-PhK3xEKEwth~7Q?_GUGhq0s{`o2L=4v>sJ|vVxnxQ*-?KbX=)spVylL-|S(bYh~wWSm0rD3>8ts|5BnzfA^++L1}&90chhA zpivQw9YcJUEe&_wr%SCYO%90PgGJOx<0AU1?jv`;2*Loc6aYCW!ZQ~zgku7U_y7nE zf#P%&fkXfh5LbaXXFV0GYgU*2@H7rXo&W&_Kn0Kge-?lUfjQyvgA#0i&Hxb@$y*w_ zuo*oHg%jM~AG*p9Vi2~jt_T1CWM+?&ro}@;Y{(h^d4 zS9PwIqc>>MggH|uE2vvcHUm-zJ4u{XD*4fC}T&o;n@g~;=4!TDTwtf53a5DFw{VAmuX{Weq zCa%;|UFAsF-wyXxoblejBxxJI=@nFV7A+j-+5HNG2_fKq3gjK6vBqJ(!D>iq!BJ$R zsVK>Jl>Z?VP)FVsbaVvI=4eT~Xe=ti?2|yhNFFjA9+v&Rfx#8PrdnL|%H0cEeh~Zs zz8jf+r~_Sq0Du#Y{0Cum#`N;LI`^PL%MO*~lVDYE0LJ0+KhxJ;b^p`t!7CQGwuyxiUq#GT}Osxy$@xUT}^*h_cNe?P()8^0nLwQKP z9d6`)-g`0me`@i*jedX_*rsZkKK{OZ7z*s>-(bqr9|7!*F&g0g_L8fsIPmlc60I%p zi5L<^KmfoJD6wJf*e85d{kMYucoWq(D@?Dy$cO)>k1=o)Ot^YqR2x8Hx>5#-V9)`B zpaP~djgR2nShMiDAdg}wauxw8>wXs?d^NXaPe4#X$r|7|EJ^1+1P3)zF!}0Ks+r3? zlfc6l_4h+`O(ajj`XtqRh{OCf1DyxWQ~<@9Rt{(X_kq3sVjEaB=$%YT+wHKJ7;44t z4Fp7p+HP0?01yWKPyMLYHTI#Maw{JM4@80vKyLsJ&;UR!cHEWZ0RmnpnTK4Cm;vP2 z00>ZWJ}(@{8b$0pMMPt)4Wu%sJ?0TS9NFa;$~j};vNB_xV2AOEoS|LITT3=Y6%8A+R;J2!}&zD=VwKbODt zp|E*CeXm|%I2tMz50XiVm7m*;P5CVFWA&bIp5M3hcgXQa7=?+DIGY_oS z28*F$)=3(6dN*kClZ3&$5MYZ_{(vbUI6{hJmLSF~nhMd$77y#0pXwu%KLQKbZ>97DppM1H}Y_EMS8rg2*|!!WK!CdbQTKKt#YmzD0JN zh>9&BS4?YF#7?0v3X=<3ri!3XI7ZMPOcZ|_+B5RQ>Hm4rem=_iGiRg+T0e~zEFB=# zP(A+91(WiXJ|E<%1b|yKR)bYBjlWSv)l*FOx2-pL+(3UT{@Ua8H_%;qUBj#FRsj#KulW`%%RVEUw*b=|@k_hV6~u!`ELG&9 zfB%Xx?E$SE8KFawBTD~4Y%y^bY575srv2@Fp4UPkb?pPWFJRH|7wAN!5#E^ZLeue) z6umyh#M!v{dlM?r1QZq&nS#@#hvZS#+!M-+r9N>hOCvj#99i0oChiVajO^_W9M+V! zDSWV*Aox7@>*CBHy)RoFCl-ZxtH;wHil?N5_Np=Ahvm9)cVkk+N&UaJ-%y&=1`GxQ z9JC*T*ul8lN0fsPccJl~mUdm>RP(xBgdWzRwzlqX(E_7mn`$7kVrck zV5%++D}}{UJls2$FG2y4eIJ(doY6=~u|ozucOhDXYXbr#EEdbG+!&;4K-W3o6qz!R-RE`9%*#*|e4-zs%w764RG1WE`& zh=LrT54Z)$)L9Y?oiWs}{ppZSy{9VbGi55oXu^>TG8A+JUd`y4VLE^)PTqn~Bp_c7 z1D7NlhI6?sY#RsS`v$@6A57lC>&mj%h#-gI8=nAS4n%tWVrnHzEF%_nt;R$=Wsj*1 z^Ev^Lqt#Nq9usw;X~rkkAB3Y%+TCKd+;=OU^4;RVLK>BbieP3$l3n=mfa=Vc69TMD zBXIO?gUYA(_q3Ig`h`W9`LMTvR;u4#jo5ppx)NVbUa-7}3Y4Y_Z@t2=b{j0n+N{p* zltnp`3ON)K`C)3UCX8-nh^?m67t80~u4sn_gfi>|p&-r^R&9(Sga`;0$(3L4=auwe zN(KY(506t|b>v0Rfy)pKKEp_SGA&2>3eceM@e3soo`)-_fPqX%5J5QiV67=Q{!Gn# z?U!-Txa9F*@DelY>9|NBIw<11zAL_4tC3|O;RsYY!=^B?rWTJCX|c-sH_wS99lhre z9d5iPB(Hu42!=SwJGQzEuy&aTT-OUimW%`xN8UX71H90iErI)&?pmNr9FAco7`lNr zfIw_`nXOBYplBXYKrqrX4ev57m1<1Xfu%Jy+&a|z%fRfMJ=o3Y z%XVe($SDaBEK-z2R(c74M_)v|zX%3`<3TXcu;s4MY2ci#sJ$)P#Dd}MlE0FjU>Mjl zZ&K3A8+ejA&M49P8C% z47@*FnNrSEvy>!5kjO+kJltbv+{!FFO)DGaT^@nae0dSsL~isBcsI;H_WCPe<;Pp2 zZ5x~A^j4snP*TdnzK0H76GX|_@p*iXsLUWD$KPLR;vJ6J|I75g5H7-+6cH%CxkoYA z-w2D;DjNzQir|7SQc@BjC>_}t%R>oZe`5ZQ(91&!R4O%#l4c;k4{+p1nzo2WT3tfB z0Z@Y52->6CK?W$f2dkH%uAu8T!kGNpKKx>pb}f&vY#7|UAs6b@)j7^Bi>WeJOiGGW zVO}Bq4w%H9TK-PHRlpa4#7aTMF+vjYal{+t_0^s(QbSEZ_j$KZ>&5#$o~=z_(fyWU zU(9au^`aFGU^akz1`o^TU>f2a4=I5#e*WK1Te1fMfFU~sAP|s71LP)LF<;(QNTUoEaR{t}R3LB;wBP#R z(A;udf3sl~N$AM)j`vTDmiwmf@pZxe;E{GE+Bn?8g&gLJ8Q*Hi0Ji0wm-GG&;@$yS zsWUYd;1(JpMO}_q(4602wxIopjC#sLz2Z5~sh(qx&903?72$vt+ejU_b%@0X{%kwlVW$(g#Zg005ZZMED)nPQl+~ zy>L_nfy@{HLKz1D3>ioO7!hh_e`ox+KoP)fKpsK_0A0XpfXV`Z1-J&#!c|17T4fU^ zga_z9&nEO0v!7;m0{mcxhFbO#AulN;4?xg0+1alue48es zql;&ZbbZDS5dNx|lWteG#=?Mu{Zovt z*x!~sC`>xM znnO#VB){T`%Ni+u2f_$1eF5eyN*B}vhS}VX?-1m6Ft5{2Y3gv`yAUCYuAs*7zM3I= zjXg-T00aOq%0H)>R-z^jBf5zZDmnkS2iW7z;laTfQzbr?z6>40+}4tzlP%#e=oCkS z;K)$$;Ih;X0Gsici}LM{%20v+5a!Oie_G#K`U;?<2=e2AZ;WBgA@^^&tJ- zgmPcCz|SA=p}b{7U(cPLpGttBoFG8^Eq@t{z+>F>6n#J$PrH3~#UCo=`aP@>HXlXC zp#f-rW9%%wnBoPujT{qw;o_|WMUX4sPgXR7d2>I@cN9=)Jct<-2m zYWzs;(&)VR)9^7~pXYAh@gDXM!Qv4yA)dOKZ6N1QxoZWi77d`dE(?Oe@s>pb2c$79 zR(f;g`BcMFf!8mCFf<7`0WUx%lY*O#s4j%57cG-wC;;C86(HaQ4A4xqaqXC6@Zrc7 zmH-95!SFZ(V{pO5eZvYIa<^bYhS+X2quaI#e62G2S~ONbm{VPA>alybxWv7I@1be_ zQfR884%asV&6M5y>Hkg>xE7ch=Q&{OUg9T|@X%)^^djG|aC}Q~5UA07m0;7revqH$ zKQBq3Y$BXwN(tl+8bo>@D*a9*KD5yEA~-aEev!=tXv}`ge2j!=R76|;Uc#pd0w7C_ z5K-;^1{+Yn62(jKN(yr6fR&55Ao!7o`~zs|HIT>da=}tU=^^4nXe)^dgSesXPlOCi z(ZPM?@!(w?3XKy5RrnCs9JiX5IxDlc1GkJt_6so@Lta<;?aj;IEg~|6J!46j^rYvu zbIWv^FwTI0F@NnR`gkG4ef0Zfd06$-5)c8aHCP_-00;yRe#{-SVz|UbeU0oGSEk!W z3G+8v&*pw{l^JP&q*Xb06V%g6xMG=G@Ifo%UOWghb7NBJ0p1gyDx#$k!ZKv~NNgd> zor^YhP`JA!CJDsB-IxQlXvZomHUL}zZ<7xJn%M&uA$aFEcDna93`)umRW|bZeym8i zHw18Z_wx^Bt`3828ft}0`6{fv{$2iPqv)H<@}rc2QWgo`Fgy8GAe=T3*hqy7kdd(H z2?|w`XchDQ;1V%zQG_A~fOz?!j+aM(4?`3N3kyh3{TCL;MZl4=wi?Y>_rclZDR3wt5r2n4veLvh9LP=mPR{-Fk8{foWuZ>|kEL>+xpkcLP@ z#QQ%S``@g=k4Kf72jzed3A2JX3+Hfk;Qvfd?e3#UW!-FO4b0)7DWK%<0k7%7N{z`!dc z6K`{1I0>Xc2OAE-49hk7F$;@b0GOQJ(jh=tVHE*O63qN~2P|WanPUe8r6+~RE{=LY zBqnkW2(Te2$+BGT{x2#JNq+upz7FJHKD z3t@Y2bW|08w<>nUoYZJU8GsXT17JbJ_jm)UxmOTA=uy3}v-*_*<$?vy2@j;aMdXV? z@EHjfgJ9T8ynIAs&LhFth^R6WfaSX!+$M&v5eL=}sk9i&dr+c)k-1L&Ei5d5|HUB` zY^R4f$l9x4E}#$e3g8de96+fD3JHjC85U86pta`WY(P}~w~w;BVaNo?0_mg!d;vge zA7xwE1#>3m2$dLz%m!u)+k)T?l@Z_0lU3)~Mx#LWh-KFI>~8|1ldOS3J}JL0rQujmG)T4qVz^ZTA*I-w6i40Of*NW* zAe55Es~Wq}{h(tI~wPRe3X8#}P5POI$ zG3AX2E+h$t`ds zZr^r@9M#m|zro^=1=omJ=pt~~uz|PdG;i`|?(b(3If)ae6^g}Tj$V%}tV0|iC%2%7 zjFCp9{DLeOni6^~`Fk8mOp^uSQyk;}35oivfA4^4()S?WLZkle^s=r=ig6?^6QW23 zE7#e7jzi1gFfoV_I6#5|lVs+=3$LO6_xvkWb~2@64ggGGI|v061VRGF&NR9~U^Q4c zB+>zO$xQ|FVQU_^-aB!@jE__uyHEq@FM!@s2~GT%XS(w>&XAsfra)80H}+CG)(7Aw zjONFN*?teW>%oLoXz%Y8gSrxu{n42aNe9)P9t^riM#qF5Y;+wDZnf{YqFDZe)JBgc z6FXN$R|s7L>LcO;cC7vUWZTR4z6@!ve-U~*Dd@vWD0wNir-(oJk!WMle0vyM|Ab5& zuX)EH9795c^q?E1NObLs7Cx^aKAI@XW`+{a9Y68&5-}zcVIvZ#XhhQIn1BDaLg910 za%T_FxTUO&B>LI%)-lR9>JV7Rx_(RHM1&+AB(#$M%cvPZv@9$^LcsyU!9t$W@m+4WHol0-B-=%>>NTb=I>Q5kCpLXd_QIa2a~Rsgi?hV zf+`HO7&w`}#+KwSU+tpzyy9pV6uUjIu5Q(qcKp1kMs=#>0nli z;C(c6T2Xc`l*ASfVAwrH_6>v1NaVaD<9p6a+3zr*nLR}e92Uq~>A3Ibs9V|q;fHf9 z*i;MP{0D4!>pB&kTKy-Lq=^`L4q*ae_9%h|6DZiumliGD_n<5y<4K~h!RYi4U$p~B zytGxIGPkg^+bJy00L&N`aL#aBXL*1lOuqT&fG#XQ6`0=^1A!??(BWk<*>vMF3_^S( z?l^cxLl&3y{%+rpMSDqpC{JUB^sc2up*Wxa7g&2q*jydxUX&yWk$(`am~5O^U0LTKXFvrfn55qJ%!>S{)a1BKFus&$ER;=_UX8 zi|^zh^4EoIia^+oLR$$koY8z?_tHVY0HJ>afRdDcPvYEB1CapaQKa$M;BzRG?RXLk>tplmdZ?L020Nlv z4d4DZ6A$7Vy8R=8{C7nNpnuDM1}d2Q<@;YUzu1(A`X5?HLhJB@8H%r+&)tdco<@8( zeNU)5KcYA#-173TgU4WeK|~-%f#oED^8MBR*~&-gtrTIv%}Wf_w}IP5UugE!uB`)S z&^cP#I@v{$u_y{vKtriKd?Qpp#B@bch}iSb|1VPSJj3L1*wCHrP>b5 z@%=enH6p)h#uj(<-9$rwZPFEG9zs0*YdFxLMM28mx9C@3s~LqTYem|tm| zPPtt?!z^T8+Y%x>c=acI8Wut9w|Xy1+P7w1kD{Nj*U&`-7`R4QPR9z6cn?6vUQE&@ zALjEL?_CNMa$X@Hh)4Wc3`_KFTSJ)g-)N+MXokDlp;83hl(u1(2YG2-C2A(t_z4g7{>3FOIMB zwp`*755PtPh_sMDB*DCr2VT(fULtIcmb*~H^{#&t7 zbOseJo3VMSQx315Bn4gksoGr?3;`^S9%W zT!Z>ixu$FJX)2dcehpZm;n87miWAsSorvU72;91h3zjMhBB5c}SI^L~EQlXp+mQ?4 zoPKcrH(z|TkwWJ%TnQrx4x>S6*ijdw1Xvwnp!Ch61xh5yR7EZ0f^6V*?7M^5x4e%) zFXda8BFe*T#EL#wz9v!&t>G!FW3uK*UxEM0MS> z5m2atA3I=tm!V z1v=#fI>fvv4#esPwEKCgs@R|9^*1+!qX+$;Erh@QRdV{Dm(_7xdnU&iIl?y#bI{`) z7{duO9{iX?vT|U3{yMnoU*AXns`C!9e<$0t`Q8s7$q%Fw{truO)a27F}dc^==!$Z?iX0y$JfHio~fyTr1wYC$n#U$u+T zb!)HuDXk<9Vj~R@A8HPK@`ZkcfUyGHC~bpa_6+0^mQWo!2B@>~zA*VvBy z_5NAiT3tmW<30Cr4qs=b;_v1&Mik@B{sa$ZhLxt2oa#%@AM~J+T`$PtbKic28XEx1s zJZE9c;<%vf-?7QB{RR{FR%HU5mDuk>{de~1i+<=WMFSFEGpNc?nm)A#;xMto2D zs9%h-_9BL!y|{5H#K|m|Q`mCiRa6h%&g1ZjoGCn8tRr8vgOc0y| zZ~FIqUs_-^u${eDe(B19g$2m4n2afPp1_^zhOKaXEk#~Mh@((B6cFLk2cv1BY%P%Xzs zj0L@l-TK`&)Q2D|As6hOqbV}zTCIiby;wxrM42(fIh~(hUP$$quLtY0^AZP0mJIoK z;MSx9{JknJ-^Q5?QKI~$v=5Bce4UWmB^au+6i*eiWp80*+zouYSfFRn14jXRb~BZb z4Sg3qdqBr)DGow2G>-(BQJDzD08yn|=YTiV4&XOG9E^3V08+A;ZI~~f-}z{1Vn$lXqy(BfXeI82*Y5$_y&{Nw}>eTVY6 ztub6(u}yw8-#J9L^I)Ntei2gr{r!2O%8Ret>L;husP&irwmTa3ov;1o{~zcoJd{Rq zIRCQfW8OkH^7oLY_T^g?B&cg{g)!x&nE(@Wj;=w`PfM0h%H5AB*A!z3aDegLa09Kr0E( zfS%t(Z-lA9liLCxOo%7km_)2~P9Au%lI7w=ftHIS8eYj$yyM|3ag_fY5<5?*Q%H%7ZQY;-3({3IuR)TQD37is^0g)+A0L0oVdA z1(XWUdtpHQ37J@D@8p!k6@Ds))FLsw5p}wg&2HGkw{{Jv8v7O!XcV}rNSG`C?17D) zdXPtb#EI>v_wFS%>C?$0WB6QK=q~EECLN2i>=(1a`!>(>XcpVJ!b3yVDi4qBiw8}6 zzbC&(N>b@kA&P|cug5}I>S6{tCSSqz(qDqF2h)2wbg%*_g=Gpr5r6~;1&{`GRuKOJUMBYkUy6Nd%u~-_x_D8LGKS%JEdRY2cMIs*O=Mau%aaClf8oz(C`-YDajc6S1Jr%YnkU1H*y7~dtB zrkVy0ODM(*1i}w+kK*bu^+9;Li>zyfo_wj;6anFZ+Gi<2p#~h`6av{MyOIhW91teA zs}$-X$LTMMCt7`RML%`&wQe3p-G|+T4VZ%ru`Dv)!LW0Y@;feHugD>v$JtOZrjKDQ zGKO(K_Cmj~jIk(SK@^War>@@rJ%6pZEr$}Ji2$*$^}IVAL5sCP_Mx3_hRH?V1@z4Ftr_#44)HS{qHr_T4nhX4&6_g7Q!MKiZXe^(dSVs&eV4 zuF$A)*ia^Z1HQuk-)O(qUXL0WvPPiQz!U+vqy+NY?D?VfmDCVKPkRTu`|p=h!2lkL zfCx$ifC3zW2r-BZAvo@4&^uynF=2tq3?U02jN_**VF{C(5s9@<0CUG0g}k{h4FWPF z`Qi|dl$H<4Ab5yRl$TFJG7t5E_jV{NM|QNtgol@{2d3wjau3?wID+LfYm@9MAeCE9 z#A0Ck2tN1NiS3P@pM0Bqz7m3Pz(@V13;jrdMr{~U{r=y|--HAVK`5fOG16ge{($;f z8J|Nbp^a9c*zdq8|GyYx{??^;H^hR!5@+{%%*asT z5B~J#JpJ}4yWjP1MF_nfS>7*J6DZEXaAP$n&0}nuIU@nUW4;c&^wjGS%kJVUy(0N= zQUtK%Wfc+vML=bZGExP`{Jo9@%0NZXP)G1nCMUWXQFKFpuU`ABXV@2$zka6o-GjWHw!}{qLhT_lmUNqlTBxJrk=rq3bMVL3k zIj*`4#(vZ;P~Q)r=)jj@|Bkohp?0SvoNM3xR`E#OROF1;H?Eicl$|yrUMq>-TW>t% z8q89g2>1$rRql8?XoHIG(w&eba@5{EjfE?Ip(*s)EroGh?{TQZVq8@vLzBy}fJ1nV`RA)nd_6wlLeTuU)X+DJ=#q1u>YK9*dIWEq@ z{!(^&*W8XEtjZ?-LLi2h;g!OPuxnh`+7pup;oi@!b&+;VCl!q_Dba}V(U2{sLb7;4N1 ztN#%}rPAUYgkn8_r+~8@IzkA;EC#WV5~8ZyS((M&RMb=k5Js4wkiZs9CmKlLXQT1X zB}l(rk;27H8Jc|QbIOA0me5+b0!&*%3Xi$Ga-m+IZdTJTNU};)G=s*tqP<}M{@u@; z*l-u<9UWEF)6`D*dX@m}8n{~zwB_%)6SY#Rr#W+cs@clznlp+_;M5caJTooOcZy_MIYQ!7iM}A)-q-)YZ|zW-{buSU>qso zXlVtNG*iLATLm27{Pm}QeS=1P0pnnhEYGGM!4~1HHaq|X02H@CBjp7Ee-Z-T28IG+ zH)}WpJNWDdg>Cn>6);B#VPG_$<;18@#NaiKFA0(#1LG2Ly`8wD>OcZ+fOZD-fh&ax zQGC7knJZ$iH~26B7zba=lD6lESX3q%gaOMQEMQR=f}@7Z31hRcvGeruubJI}hx7DY^l6L7u|5 zr~3+DAnorh+K8s&MNm-;Q{xGYguz0`=x|(;)dCC_y?{c;N&H91Yd{2Btrg>hYX=hL zfMS>4eWQP`G%17zYayXK!pHCq1hv@h+7RG*rJkeyV6&{ILd;;wAVA1DW#Itma$zis zmJ9=km10ojim78H089`)LY)RVim`aT`GG=T0P+;;EC2~^RzPqWSZ9C%2?t&7R*k8N z#aIoTP{elFPyxHHEYTy+;=3Io_FERPqO)y=7J7}RzAJw%zZ37%7*16dHuv9;rZ2HY zm-74T-?UXb(BS3r!H7q4ZC1fB!J&ss*u)Fs7!Ew*#}rHBkF||iorr^5&}h;6C8oO& z#`;`bL<}8E1KU!;-fQ9RW&F3_$=e^^$KTk3kb@Enwd@byDs~nM2p%{b24bi*;YR^7 zu3mmSG}z$2pzpc3;J?EC;4gga*SR!%m{pzDulA7WCL!N&XL_5W6aNz`HWrw5gS}Z+TtDqh1Iw?;#h~-1%LpV zdRRYjZxJuwm_HmIqAQWd$cc}Ul!9JZ7=g^;%0U^|(H(sI%(1(F6%rv44nX5I<+9_s zeRheIS(rbP<0ojK(wSk`HGLU_uJGcZaJ|Uz03!!viZh%CCyVqMKwZn&avhEwICQ~B z28U!g7Qo;WhJ+Rs7AtR$2?Y}L4$-L<^%u^VgO)xO8mWAB#ehyQ*=mv}B%MDn{XDMZ z4CROfdxhEA!d^}b0r(5=_&fxK zl_CPPLOwJS50dlz^x>%MnD6h;_DWt{Oooc}URt1`#hAiG9ohwK&i}{X^Z(uhe|ocX zIp?vJ>_8(hZHW*m7ZPG^#QfFA9y}Zicns)`X&GMr&cbge`CbO5(|!a6R$Ac$Z)>I& zbajzsdQnL{GGL5ryfE+!;HtE+)Y&(nI=!n{p0sSydwn4OcJ^SIbFc5FiQSO83&wic zrvgdz_@c%U4tW;){c}+#Vf~~L15+Uq1YWf7CHi8x{RjE0LUR~qbs@M=1Y&+28_jzv zQhd@{Q7o=tMOm}iV|_570LzTA=;s~29>!j?74uEu-jusylMovMLJK7*k=cOzH4vcm z`XHU@^?!Jl6Zj6ca>%1#*ftErrN75N2)cgFp3VJ;uQ2~O{W73?qi@hYY-(Gz(i_Jn zTLbzdvYm}+qgn;(7msdkF-(2w!{FDsUvr{s?4dQiKANbl52OwRpOw44+b!D|LJ|2Z zV;Bbp>{ubchglq%qV^Q8$O-y0XgGz-05CWKbyC2>QW!{)=CB}KG#_iCG;lg4YfZ4= zN=1$Z>^p4~7;RdM0bhn92Ir_yy)7h*ZzWU<-gcbKBhs*J8R=UwQ%AuA5fs4DASDTf zV1^YxmhTf`V6!o-7GzJQAm2eZKx?Kx2W-snGX6XD!pkc~$r%fb#AMZcL z6lFH{9-Lt%>}8WoaJ~x&|Ft|C`{whde^}_V*d_<)9-_1l?=Ov5$ml~y+C(z@+G~#W z>S5gUmv6>X6!2bj1??fzAGi6Stna#TIZB;Bj_kY?H|S{u$9H@WxUa+B<04ulQeQxN z`aeTsCFy+Ge7>*(;{aCyS+}}EfKz#343eG?5IjplDNPXoDh1ozuw)v5`Z-(R+S4lc zF}?7BBq$CRz%Y4qKzvyTv*3Ap=5~wq1czf=WaU5}0h#ZhPY79c$35*>Bc;=ah^p6yWjs{vJMa`_V;!T{dEyg*Pz%w`o4n8chPOc24U^2RZ4^6S4kOd%CZs@D&k0S4~~eP z$93`b`iQ`j%0Yn~gZrQt_>1}n`7HmF4m7?um*iL4bY1>L1MQ|KlL%XTF!%k6j6)yn zOMy1@=b^EFc~<+xe6LGpjg)d7opA@BFyI1#)HxU*-{DpqDi6WJpuX_rFc-nWKm}hB zXhosLaO-{kW?_JT`N4wRCvCm$dbkWFRDq-KC;8uDoA4$OQbZg{LetVc)TB5ARhlD zkuu1cW$CHQs^+1CC0GXQ)LWPQMM^4Gd|cyK^y`G zmX!Yo*}pQ6rlyKne?;+KQ4Gh-zonJDWYFnB99$TXe4&7h8N`6@Ny&@aE}@x>-G7*e z$DhH$`k89OFhdfBu#7_s*92fXPbMoPNE9ZmxL_;q@tLR5bs%AnNG#5VNS@zMNNi)L zE9&(s^YRuv}Hw)T(&cIUS+(Kd9+^ey9&S`2sw~2_!Vm+3=T2S{7ztvSM}&Y z5D(``m!^nfvquaV>^vq2?JfE3g<3FzSC|l`3{w&0^B(8@HB)|HO4pU6VxqUioC??Y zL!O=HXPKN$_BjjU1i{(F6hXYB47G#-AfU(CP@Lw{7_Kb@(@Rqja75)b4HM0onpnjo z$YCNV6l2dVjqjqxG|*sBkX4KX?aR9h&K4UCX6Wle5g13F=hg7PrLK<;GRrVI6DnS5 zd-F3;%+I98Dq3HUzboPbz%Ef@0^?yq)^gu;zz?+p zk|Y01m_zL7*~SYw7wLJ9fBsl)0mL5#{XgqOgDeGAa&U&&(7cP{#=Lxg|y%<=V!0$sy_fT(2)?770uW z!l|Q$A$gOu?lWqnICmL}bNvn5AzKr%hrmyu;C^4~RE0wB^1o6p*i4ydTpd>D#6_QB zat(9-s^T;yU$ZOTVMb)(K^J>ZTm7Ij_2@k9k*CUu^17U-Wugf)C{YP=vh$pm<%298 zFzDfy4f`tb%-LcN2}cMp&}#=J-7rgjLk`%$kioXmL8icBYQ!vMofY650*hHr_sdLa z{9L8aFRxL*ysS<%weTDzDL6p@aU5|zI0f^A(m>9p$AkbN6-u%Ca*ZLX_&Dti{R=3Q z<7f@=1EBwTw90Q3)ZJ#Z3-Q(0?u4uSRpzAyf&`hsOXSV88L>gsqj=cG!K4)vEra{SHq?-r6=q}Hn}9Ziv^5G10471=xXP7kMI6jV@xhO;b*#0A zfTSA{R-bg*{$Vcnt{xNo?R33~_A|5m#_!v3W~Ub-+hDN- z^s%2djJ98dILxQgs=3Jch`odDb*Xj&=sL+4ITj9qyDd~y*kqyma`7D7MK7mWSiuC) zW!0hqz@o$lcHy`>yW>n^cDrad6!1_dMcv)dk$o(@-yA@bupL=pI0o1o`vX-!EtcafLy}jsw7B(vuCuK=O7Z5AQQLP?}CM z36T+ytRHoCf=P{@WMR*C6iYOMECPI8;kPLn=*iz!31+_r0WZ~ z00;t5I6o~nK9EpyAsRd!-tBP z3W9YA?5GCWs2@ZkTII+QFfxrfcXaWBL2^6pClMhB_pm_oR2dU%-;s?FNbnCrMB9-+|mIzz^|NDyL67ZmtS?XI7_hRFI}x;%0qXD&q_$N~S}_sFxd2F*4h+f%9F*yC@c3>r^(Svy z-oAE>Bn5#(K?Cy@%@3X-HLiv@? zW-~x2RB--$x1WqBj3(8RxBw1XFa*UL8#_liLW@O62Kg5?5xg@MR-d$o?x8V1^I4edc|@akpI z02Z-d?hnF%#i0b+jxCO#t)A=4*hPSxAoc>JqDT}VI0R2*ziZn)jT)8~O&ah=PUYMf zu3T&GvBXX0TNm#8Mc3{x?+U}z}*tVLV&hB4Ixd6nh{P>vPl-QO2{#K>%px} zFS44QpM*-nZ>K8*^#5TWdtL2m8B+SMmv416k|AYmcw`CAd#T;ju9sD{^n>L`eBR5) zr{n%!*Xv7OTN!^4z7`tq<+}ZfHQF$_<@Sw|_F*!TFsz}6(u`)nU?R*`B>JWPRC4N8 z-Z|y)d(uWbEBl8ThD9|?-g|{e07<|L;IDWCv0t>bUPnopSFk|}yf+;Zj2pTJJ|Rbz zpVyTq>kKQgFiQk8j^+n0<+f0O$k+~hn*6g5VE#G_OVl=Mm>hg|_(A;1PkHDzU%>n2 zubcnypq6Y6IIQ#$n@0?J5-Z{`K@QG7N&fbjGZCFhSO19M)-GH}OGCJ<2bh&Hg91BE zboXEANX-9SsmIOSR~6eFY7{^FDyR72J4m$Xf5#Ss9>28C{`8(gGhfVC>*al+P4dKS zt>6Bl3(>pkBeg$cJbvV`f5cj~DUb4O1^F@*@f`>lXoziF`SABSZ`lQtaRj5{mRYZl z*is1|YUW@#;uIjT9hjxRU+oHDKJpS>uwfS`Yh}|A#i5LP@gu%flN#fntfnZn;C=qj zOO-4PFg`#aa2uZuF#F#~i%#2p1A&>3v-rN#S@Es%`Oc`sHeU6{Lb1I7^;LYp91wj~ zs955HpZ56%CQaS>QDQ=%Wz#0FYESgJ1PN*UjQaFM!}%qKUTh2(lTjPU%|?!o(^D7! zG}ayw7DA)TDjpnKFSNOL@hq1XDhJgK;QjogLP65a00dv3o3#*uJP>l;ANV9;;)_~% z0z|SgwS51|`NW6_GpvIQd}Tm-V2Q6e(DJ2_`LN#w&dVun{W>K`eHUy@TZ?lu7VgMv z`y3=dksCZth2Z-K+3BgK5E1AvvF-B+1pdIwn2`(yk7ZsIF-;aAc|=1x-^Fig)GARJ z3zLJ0xa3qCyKFXsCO&){Di2wt28KFjBRYr`%h7Rx@FtcP2Lk>3a=UBfGAyD4K?8vN z+8Y;|z4m^UaV@@w_saNW;S&&mAi!dS5HgK~9FE{ydAZ{LX(h84uxuCu zE@{wufH<*zz5oCY5+Uat1i!K6|2)_3;Ck0Lo=PbHH@BB@N4Vqn|GqRw)kzVSp79Te z-tGwI?$u;`P@ZDf@YTkf>bQ+T2BwyuYGNBNkc(C7q7UD*R1D~zY?j-mh{)tbWf~$` zP|ezoBeK-ZPRGmp<$SNn!X=Vd)8&qVc+w)2p|0gGIj%I{Ro{+^~3*s0qH@FaE5!h8rrLS@&=a z$1xRHXzeI?KmXwzzvTA$Rm6u_bUZ;phG5eLa0vGBXz6_Ssm3NJgzGIjS_9h;SMQ0@ zr{bBhywP6JznYn5gR9C8Km8i#62w-!EJP5hgek1z33~3nxd=jysDzg6a)$YzCqI>8Wbx)4UuG9 zPsn=uOQu>wWB$+INY4HR-z=@?VL9<^syLY$Y72(`(NUEQY$!meVN#QZmgZ&(t6U1> ztD4M4UY*A{a~?do@t+W`|C{=XSH1dI>E-oxdyO~HQB%Zwm@{EX6vC06CN?e`Ww?Iw z)GVOyNgT8nxhFTXDtv=vyWU<=x3*3y0}r;J8v$OEExsEqJjZ1 zunsT`s30yo6CJq1XF|7eLxM=@VhIz!;YKE65sAgcfYFOUMw(;{`}4g%JFry97Y+q( zVZeMbTp$G1KfvNCfGTi0C4e(K0+b_jkPz5hzT_U4^{oJofZ&7~!Rd+k@q7M{H~9ZT z_r_Y=@1;s_|M(!VXxhwQ|Ik(k#?gOk5c(+bK)7=B8Vt1Vsl}u``&5 z3Mv=IPvK=gkiZ;O%YB#3G2G0pX?rt7UaI#YN#`kK3;#jgZyUE2rk6m}E&x^V6hlaS z9Tr3q3}AoJm9W6qGVWf;)rLhvpgUjISHO>Z0wcn#t~1vrK9jyyKd$^s%kwfl1o&Bt zodQ1+`zO;^`qKsWZyE^u-(hf+G89|^do!)_51kDB{k2QdxMhQO%!qPV2qFT2q>|GF zZjxr1qgkd|mrr8O+skt=#4rMQus96LJOF90e9G;GL1~zfZc@?CG=oHZ3|-&waW2fV z{9AG~{PUACaC!fpzY;eG0#E+z(R=>~fzx;v_BwC<`l$mo0N|9R03wG0q?O~sz&HX& zT&7a#0ZWt-f&oB)HV94%DmFi}$KvFm-a}lFOs4u%F!l@ZMRIhz;R_G$sdGzdf7WDN zS*QpMp|WM-Y&X_!{$5oqft&Dj_^x3(QuYQk*^4{M&AuV^mEzQR`7CPu2a93?8w$j+*gjE!NKzz^!9dQj3<4jrS!|yR~3gfd#Uz>P%%DJG<bN4oXR>>LgG~v7GvxR*Oe>VTGm?`prC3hCs8=6;|j3Vrs1#& z9n1TF(qC=9R-6!MCJVq8AP2QELjWQmEe0C~g}7R7m|fnfsAY>!jNs zt%G7wd?ImHmZ~R+8kUR@Un#+_V?5q?yc+(q7m$#r$MGE+TS!$_qEAcX1A6ZL;7tw|4Br zN-i;TfFLHK=>zPZ1(u;oxJo#SGj1UBQu~b?kStYPK4BV)N zgtE*W011;Yks^emdXF05*6wPV3*1~q_9X3q?w3s;9A4fq8r%Xe_G19W$Fdgj8fV_A zkE)0;4m>b9ZHJ>H6$RH!n=>1bNUqKVD|i!tMU-$XfUuAgDKI#)$BOb+*egKBFf?!} zDS-)rr*R_-#DK#vz&KzI9XtIv3xa6;0LGf0ptd*DmqlOg>Ew=QB4>Ta@+gM}0VL&e z`ntZGNEjY%maUSuiZm56Na_YYE-?wHz7F4#tA_xbR#w@TsiQ1XkZIp80;vS;!_(MU9l^(~p-*;}#- zO}|&*5s6!ayzP;HFLShza@j1$?%N~w9Pw!%{qp~2#1FGFG?}dL6k(u|Ck0w}pD$zj z*s?5tR>3*mwpw>wjUMyj{5}8#7b+DdW^T#`#sSG2wlbmhJR}XoQbtSON4mqW`Og@# zOv6iz^AW&M_P;N*hycOBgqSNS{9b?^+_BwYil(VnI7aqsKwcr0^H)g_WIm>8)Xffu zuhZ2ooqje}I?KMeh0-B}n=o(J<`K+2GfNOrecrSs7u8w}w98sPNW=q%(jJm7%K7hT z$%pW4b}{_F-Zex}y_d@bMEE%a#CJ0ZLkRDVUi{es!o=Kc9BdK}d>j^1vB+}^^TZyk zbw*?ZQ#@#BKwfQ_7=-mPLqv_Z0q-oDf9Y?+f*dsP01@DTZ77Eb0^q7410jRf4%~M6 zQA=NkkpLEPpfyJPdic}H$@J$RZ5`{Rs{Ov;38Wkf3Y0(q(~w)yXrE@&{&xi12nMX-W~0qAnd}l027{dS*F?S zX%%@}eu?5{)2;RpAjUHv>t1hBaZYO#RtT;x`(Z^kso;C3$7Ee*qdF;d5gG@;^u-E* zfCZsm$KV_0ExdDVBmxDoB8RHn#eYHsolmm+lkIURDuf6|Jmcf_FmF(|<|g@Tyncns zU8be)>WvsyV0}J3r!GXFjJN+WBNEI@Ei@{aD@8!9g8&d1-=MGp=xbV5ax^GX{bHQ9=9UTG>JcIvLvWJO!T#h4WZmW*dI*oZXg#0x)xm-wi;Au<-OY&U_!(oj3%P{3P3iKPeI;>&k5)zN(EE3oPJ=6!E>UmQF&^= z9bP&*WpyG}AQA{dK?%Z>e?r0`iTU}>`$Ld)U(kv9Jh5?I#OT>Ij8`K&T=f z2GWI4-*z+YAnLPwY#)#QSJXkai^MC)Kl^bl&naF|N`oMsJV*8Yrw+XS(r_9O? zb4GGRRc5)6jyzoa1N-ijsZi1aBnv7CS)iDq8x<@3$TVT67zyxulp5Gy?5lG#3n+k; zjSMulN5Nt4BKhdrrI*@CZ>8E0b~ami2^OD>T{BUizrP_HVfFIAHl8*kbqaUwKg%YA z^{!1X#9C%t+5RWlbmF8Z6V9+yoIYEiplB)}GCWl5CW44{%=|e14ScLuF=1S@01kZg zmVfw4SJs7wREL$G%{-(<7@LTfF?wDvrP9mxh0rcUo9J@u48vj=nT8P?llCZJxF%|~ z#zg);UGEC72rkA26m8}M5|_6<_M@+q{STBuH})1!YfPyaysXt6rAT*2cPVK9qN$7j znr%4da_TFY4um4$8lA?^E7$!lU-8kgV;z}T*y6EAF22YVR#Bh^as6e#>wm;Zl6Jp5 zy{Lo?;HnWN=r}z6yhr8hP)PyvP*7UpQ!^-6Wqhdw(FPTv13P6j1KhkrW%*#aMX~*t zdU;yXmAijORrJ)-3L?ki#u?xY563L~Az4LT{>Lj_xtF37H|4BBv@MHlB9z21+SSn4 z%BMB4QF+sXBr1TtA1&EsLIEX}ls*c(!P$&G4c538#LnUudwr8ax2EN%?Pg2Yq$H#C z-puUI&g>)-BHg)sK!O;j16T@hU+r@wDSP|?01BZY=pY3@0ckyJkbfFv_`bfGB;1>7 zZ!7=#SJg>2Sv4nzGN0Hd2!e2!V~&<+^Sx=#WaZND>vfV<^<3FGY<*y`-nqGcPOG^) z+>40UCzS>WM}iD8#*(KAaCJ2T=4d}(*Wjd|vP(d)W6S)=V zxxC$dJ$&C4x1f7wYw);y7IE9>&h!oh)HiIpKz)c5K&cAH95@Ot87SjdM6zF17pV5e z+a$a4y`?D0PBi2Q1U8Hw!;DzkAUuWSV*MUudnA#2JD_MElX9=Br;_Oqjjtxb>6L{q zyU?}~h=H1>{~e9Y$}_gBmi%`K!Z3)F(sUiERoG|?9}rf(1Duq@|KXuAYfSi#{P)28 z{gX%Fe@-aHA1k@T%ZLo>Fu_#uvKKydEuWRth7ATJhMWQdfl0u9WKd3+Oye`4cj|(C z>t8ZsK;=(DEF-4;Or_dm$UP$%;fe??w@L6~LzFOl`y036m}@gLfG#Q~(R9>x=pDeY za^G{+?L}+IfogAx;<0{DRJnPzD*6d3ugn>@rVFA!FTaS=;hwY0`O~PPQt`#*@p9Q& zN>Ff!sgyc7tBJAruMDfJfit9;~ zg+Be=?||ZKsd=UllkhlcaCwoq80l}Uzb*4O#T&P8*WbRGMC=}S`m6ovxnfsBp-{0s z6@U|%8-*K*ate28@lU6}xtOI(zbNZK1X>zD2^qua)T`aZfOtJ{eXYrqBBY7MCna5W zz;Fj-S>>2I{0~P`U~)zc!)-|AB_dW=LAvp6d?>*i!`}LYt|4&?q96jcyum~=(u~}Q zipC(r;<0dc{<=BMc8a7m$(kr$v#EMte|pSxq>>G!$osC{-^FdrASnYH1S04#tqJ8y znof_|fqRLSkHrIB{!YhSn*}CWXJHRny|OOwT}EcK!K4te z6z*~fv%Vp^`D+&Hgljcu4pU=S8Kv%()YrMpZ`jNNtGk%E)Ww-#ax}>EkiB@5#nPW; zR)8g}0+iDi9$vta(_hSSA0X8r> zZmnE`>+(0XtVd@mX6bWPe73)fVLj0q^Gtq8Ssv>CL&TQl64kQf+uSzbjlqIA{@1 z>n_j%R)7{LNIU>U5CmKY;ecBN2@NBQP!8AQ104*$xD6#GY=Otm5*{VzA0@c11%mVF zg^F?EL9xoT2Brm|*bg8&jwvMyjW3p9pv=?S2QpGe7_ah?@Om7yqRjUZ`~4PpQd+;p zy;Eup!=TU>JNa}Dy{pGrAt@#^f7d*+F!605haXGF39kgi8 zPbJIP@AwC~$!qR7mFqXR$yzOy+^lg&6}h+$1#IAHpFk~xgSw3A1BK^Jh@?Fiim_HJ z{xbeQEAVo@pQAnd{q?W2!jW+fS$!X07f-sC0)DyF-OQ`B+HB)cp4|1FTMUMXV5)=G z<tkm11*4g!iH>D1B(0PHFW zKmd8bumC3z{{#`Tzl(vWfC3~mW&zd90eI@-th9yjq%`eiOMm6OK7-IGZuS${i75*n zi3ihPnj*_3?jsAlqJ&bjoQugBYe8fQhKGQH7YU9Z!Np&J-d-1p+LdevXcq_xz$Aci z19&A6C5Z&M)i@Zi+;Z6qO-O(i_ysH*j!>Y%?>ndttxF8HSUL7E=W7#kN(jV$wSM*TznW{BBzTAV9Bb%p znwjR7U;7o}zTKadzq%i@BEFS0J&0rezRcIlUZY?j$^yKgzCJAzp=SaeOU|Qb?Y>u>L z&a+ODGJ>g`T;znLxcgU<%1U2j0>NA$K#Vp#Sj4^||KbZjX3h1Fx8FnnQ{)YWE33y+ zLDo-;1}%~p$e`Z*{{y+1nM#2iRROq0DU|Sdp|-Q!PfP20ZLmMULW_+AtOd}&8l!#yt;5 zLNX31KRkRnFQp#|EX$*n);B8J(Yz_pM%pK2%+S}B8nq0MmXk1}KW|jz`Zqw32^Sa;mWm*J+!b4{;^Nu7UL@XZS zGN1EeyQxQG!_&!YjRu4cp)M zl}3mVGsSUOI{D8B%xZdjFJDGHYaYF=E^?NVU!tkGv9w5Pk)+Y)mz{W&6@;x9uGeaTZG%JKGc;+x&$tw^MXJ$@J9Ilblhga2fUedhBHF`N>OJ%aq7M zAhIFQlCgMOJv_JCWL*kz+I!zCGQB;qPq@wR@49+!Fi|a;MI8(jBxok@+aHc&0003S zA?YLqIB)$AP>8SP_Jn_`gm^FL57-T9{wMuZZsg_n6aK0*zRqqfSYJ=fulZL~f1r|z z=L@gx$pePDS@Zf#u1DX(YoKq<{+g6R*EhG~F5apB639LG6xE}5_@%9Fxx3%=)JF;5 zd;Zn`1?t71x770Z zck+|EU&w9#2+P~bZ}imsO$FNiAyWDp;x_$``5FiOnmpYEuQ&Q0q~vXK^FF@2&&$5} zh0wICl$vsXl=hj;Ux+1w?Q50m>@wHC->&E%d_{jqU;3z?LUtWL5ydn=&L>)bgdn{|9^()1oS*%OkGUWJ!N-Hu z^-^;5<%k!k)NW^*9}>Q+Wl_XFsBF9SO(n?=i6!~!wmNj-&4MO4B5rxM%#T_9Q$C6Ie4l0}@F zGnW&+jPre04i@=+GD^Oy;{EdGw#&V* zAOG-_7Bsa6Q#3G^mK4Bu&RhF`^1e911QHU0sCdF3fWf&K4*SYNoHrTb3sv~(u;-OL z8w04&a0LLMQp$uB0oNfCBRMbtR&Xij$t(N)c^|slU_ZF7o%t@@1R3U5FnS#EJ$R?bIrrsVv9BIN zkZYEOEER)MXMcO~y&v@aziYCD42mRiC*<0ozEX+X^!+=PaD>4n`iL8s632aYcuc!Z z9E%1^W!(*8+Xlh1*<>O^^cjfRQud0QsF?9cS``k5(|<6z4t5-J0(2cDq(AWn4m1g3 z&RZ{rWJhrVqd1AI9Q)o?wk&W+Vh|L(NgyO_()Z?XgSMO4v#UL?%%!iHm9%_c%YB^R z2$wNwo}9g?;){?d zJR~onAm+djx31m}9LDW2Kj|y-jT|{NEeygR71=OMvHm|#?C^-7g{Hmz@T)hxy^@Fq z5Wtx5BLcPB+-)!`0{iqVXAU=Slk+=XX}b%RcU5fAn&x8(vDoZ(I~|V4Me&(~5wJ>J z%xm`qFF_Kpy>M_A@r$;Bzz0F^zCTE1R?$OYax_CN z#F!2!66c3pEm>~VjZJpJL3T~1m}($2m|HiAO{67p3y5ApsE*6(%=03*7!(AZ&-=uL zcYk+qps1khCJZ?8`c;Y{jAPH1%dl`~vo<#P{#Xm4VuE5vHnex=$zp8D-$o~4TV!N) zx8wbXrd$SH3mMNWb_pQzMwhVstVNmfTpnt_!k@`n z6K)Q-lp~^_Ih=pW-a`dNjcKD|LSYH;8~IvL7Y>CoY&iVb{~y9+(sm6lA&?y8X2<{V znKRkQBOd(U`hFEQYz8aJtfx2s9`+{z{^h>R^pR)|)Tp_KUer3D=up98K|#RekJ#dJ zl*yAvu>N7D^PzKK zk;2%zm4%4-aj70Ne@BYB9L9pChuNdHgAb9eE0%ER8UfILz262T)b8MB6C8oXW zSzkCCK_~^#H3sz@n8-hxdS76aQ#F6&5f%IfK}cM{#94=b1T4F8U}%lyrJDVDZom|94gdl-048yuya7SE zqy>jpXg7+opx`ClBfvb6166t&i#9+1BN>NsRo#bj<;@c-%Y5H?f2Le;Gaz6MClUbe zF8_JI{o^7j=7Qm2Q=Cz9`>7jqmmf!&o%`=ELKLNYBFytB4VV(@f5a=%=2gOAwj`mP zg&3Upk{~dJS=*0Lm~4rP7gJdLtHhKr%N|(4K+>5DV8ye(wobB*+H4Pv0uh0GK$ve0 z*479O$eRCr-pn8J1}VD%z)Q)h4ol?v%}IbDMks(O8Sqy$^!1U&X6uRr69B962cGfd z!DR>UI*HW{l4S@&@!OUhfF)Hivfc%;M>sSA>QMk(0FR9?lZXI+2;h66*(jg`K}ZXL zDqd|muYPP1o>Pn|WG#ykD+>DlVw`b(!TAkq+#HMLvc2#77T22>!VI=s8ZTPwn>Xcu zNqQ)Z>-348{n%a?64;@{2REWC_xusl?DL7qSEsj{q(*bpVj{5WrF~I1X{|+Rz|Dk1 z%N2YJTF{!mcqU8Vr*;+y!Xh0jSg$X`wE)6{(ZiO#9QM6Jpzti&^GaXA&>%qR1Ht=x zi;t#D`U@`L)iC%m0LNDrXXx8dA`p;ttzk(!ay!!QJ4Pg5KYMA&MZUI3VxN`d|W^V9(4nN2LyQ@a$Y6% z^{Z+Q{I$(Q%m5NvTN+twuRaJKpA!Bgan_673Qu|cAxc&EkEnzrV5d(-SnjV4&%o`T^ClLbTfguCl zEulll%hO+;zCmT?&$=JMk`=GkTDYjd@miPx`2g?>(ZDf7s-0W3B3qIVD@TdD_4Ejg zGHCXw6bd_EzaT@SM@OWW;>t@)OMD?K=~ie%tmF_o?6EIsc*{$`a`yN@H4YR&yD`&o zAh9s~9?$$=ud|@o69j|-AuErwM#wEkODLN~0Q8=CJW#HK%K(hFd3ROmR=&U=9^8V4 zUXUOpsu>_rQ9LTyYXJ`cvw?61_&S&mM+Jxuw3%l#qq%k41<1R@g2`~+@#=HxU?@v? z1vm>Lc;cK$!f1Fr`;>3<5tSl9hD-sf!yplX@KRHUmV?LPZTuYwIW(DH8}YnLYP3{3 z`EBaF%*Y~~^bPiC>rv`V=g(oaTk5}LhUa&L_m~5 z0Nr*V_T}t&a3UJ;Fo5b0&z<}jv`8N6Fzlby4 z?|&ccr1UH;2^zm+m-Tjp8SMX|?M6i%Ma9w!&<_jNa33#4NLu@8Z+jGF|)z6wAvA`pkoAnT6MJOM5G><@<-af{w;Ab)AH6NCIJ=z zh5!|i2PObgXx+R8S2bWR^fQ}Qr-%c9mn3j;u12N5PMrlrfE4WyTKyFD=xD(#Q^oXm zo>i1eSYa9V53h<%``TCPQ*@ud%-$Wz<_dk#`7x52nG3GaZa%wrgb47eQ~) z`f6mS=sOrvq^amTW+W6-uy$T$RLNQZ>Rck`!l;02_d%t`4*x{4dIg4(y8Jt&Qw?%disD&A8Yj>T3b`VudHZ=ZDR=g-aPOPHPY z6!-VF1R*h0HGA24`YvFpq5>fyris1g(6bwG(UWaW{V$Yv&y{b-=EtW102E9i>nsE~ zi4|HP{%E??i~qe}FO>!$rQiDhRU}4Pmm=vzL~c)qJ=glKDVNN_A!%$~EG#E4g$0~9 zQkuv$Yy7UQw9X_TBp{eARG`K93&`_FZgnPqC7bWtQ>G>QduNK9#dvT{N$?-M6OFTwNA z7%=9G$1!|!Gp3~=u>H-o^e$!*XzzdGA(Jf1d=Kw9gYqE|eL_dHv;1IBU?jy$z==`p5iirq)r}$0(J~k)5JMoVpkRcru3uI5 z`KTbkb@Fum&%M&1Cjj1CBY)#BDc}1exn1!DDTpPy9m<5bi%FRGE&wDut1YC z6xAzxd&h0>^0#BIQxL3(@{Fu@yQNT-dUHGtcSU-?JYb3HzEyFEL}MN;5LrS9IJWa_ z;iBI294l_fN+JGZi#Uagk;A ztRE1p94No^y*)>A*N{k(1ft_L!|OVJ#_ptk`ZFNWiSaOhF5*r!YA~R~Ai{VeTH6J~ z>n^)_QJpRF!TuN$vyg%E3+HwL-vkYwwE`H*g1k zTbqEB5*dc&OfD%A{^ZVWfyJoCE*0B zOJK-mxe7OlZTVd1{?22`-Ik@u5>oLes&NLE2>J!dGj9 z2#!Js%R@F4K6WB&qeoe3F#rEyDXa#PZg#@~k0+pS>8kJ=UQ@N;(G0LJ-Po*LjOA|) z5PA*C4UiZ|X;BbiwQU>ucbIJ)vIEeT)L&xEqSI3e^sRUUyUFe9-t~u`;*54Oi}qTu zKZ9UG4!}YKP$!J=?YC=rKD**nME;+cyEAOJ5>0KI1KM$buBF(uR>y>3(@J!QcNVwT zQcgVNFZXmTX#$Z^Bu+2i*sir7oc^T#yBr=&{L5tV+j3}abF?Q#HOB>+b zjdKPCIN&}HeVD+OAX`K)hb0q9h`$m2cOKfv&&PP$JJhdc88h9Dz`284NVT;N15;K{Te;1u5AU2B6eF4WZR zp?IZqfPVnb7GofI2!UY0d=v&FMvdudQ3Qf@CvjVV|G{#R1G%&9U(^%Vzqtb9fg6hN z{K4!QP(>tO-Y1s8;4l$Da2jpJfDY>y^l);!To^DY!R!z)xY40VIvxr)YF`w)<>XSO ze!{>jt2fR5><#S@C<>E;nF8gQB59qD5dlp0!C9C@_e_=xS*#VZ7PD6^Vf@-EE3DLr zB!Am0v6YQ6Ox21n3)T-KcAiF2>CA>2qEP@D0kPcLJsT0>Sxe9TLbN={EhlS6)Gut zY3a@7cLJE_?nGcr&Hl$Q$4a!Wtw3h2($uWP`Rd!Gs$EY^+`~m;oSEKV2Z6C+khr0N zp}MA(VQI-M$n=dIsWVfyo6BW@bsPTTwg!8@5HObULB$4;2*|Jy;lZow@#bj*s~P~P zFNAOv1wSVN@};_D2)=2aI3$2GAHHK+%m-jB0C)pt2WWbVu6yXg?+bz4l4f4+c;G(+ zB4=L%zR23iWLSt1Sr2KyvLs zQ@~Y#uLPiX2!K7H4%i_;gVqDw<$_&3BKrokWz*c|7JWt&AP3pGk0g_`esRoA>2MmAk^Seq^Kqd11$1`&`c~SD8ot}uL=aa z6sOt9cij5@-0)O$>rJKkU2QxsUX$K@%^gu)R4isMy)6y9TVDaODwz)a5BFi*>Hc1v zcraWdaHjj8xW~1(+y^o70h(%ehRpM2y9NN%!GIUQDQ6PM7z!Khhyd_7)d>qi16w_% z1qwQHP3uHa4gg2}G1Zq;1pqYwLCVoU*a0;PJ#yiA`&9Y+;)7%Za{e`v&GXAVn=qmh z%>gIDvjR1wg>OPzZRQ;A=~$LEmv^v%Ph}IDw_vJ{CZc|s-(NEsc%;kQ7uRZz?j-PV z1An)h)2H~6z<<(?`gA)VgKg(z9;D9~7kUP%Kb~K7w-xV|nZIF0747)gRgT0oc(juU z|CVD~<=$d3EF2W!py;3Nhs<%i*dg<90nVZ5_je}QsoaU*)DFt!a@{J#LhR9ig9~op zZB`mat}zp?wBVcOs_Dw^jbQ)JiRE{%rB`{Ioc4Wt?kz?O4EQs4gvre zgFtp5NRxzuL4u7oCHr*pso?2pPiVrF>882@$`VucrcWivU2NQ>R+5 zrVm*QF_v6!+#CQkh#Y~LPYIHn1>HJwt^iD6elVi|g4v^Z8(KmXmpyhfAT6@MF6~p> z$%c$0YczhZht(OQsXE@r&~USV!i5?+)_JDkGrh)lldJ9QMT7tU9a;745o3e4pV(bWjP0#raeINXN}RH9R#%LX7JD( zr_6s;2Mn3vT`|Gj^3(q}p`v{!Vh$`Gj1Q_0xERjolPQYW&*;NV&&-FlC?Hd}Xw6Er~UPLj^i6gsy_Kl6Un*Ir4)zzVN7L(<4 z!9`gCkQo7h(gooWyu|Gf;E8tsp36jTE~o*V5ZuO;U+u=YYc9Ki8EEG2WxFYgD9oZ- zBMM(+_!8j_V3F!4>vP&op=J^mp8J_&a#!bHr_2#RG%Q039d@w;!r(>?>S@|T1v2hY zd^>Tic&K(Jb1)Scvm8~)C15U08!lLWX^`$pi~#*{45$us6zVCh?zBVq+U--sRz30geMaTu(A3M4%24wfGv$T5C27mf?RL1plB(^a$b zhYD{bUYdNu1qb+f5~Sf^1Iu#`^bx>tFO<)7N<%CTKG0fy7^*a|TyQV|Fm7f-`Zm{z zclGd18A&ZK+2|ifE}jm@0wkbBg2+-oi{7-MBZgR}KCE_uMUlh&YHT50>u;BC591g% zDaH+i!9<6DzE>4V>n@@Lq2MpDuuuu%Ah1x}5`EIy^#HHVQr6#fZFs5s_E)H&v4)rj zcrYXf;4un3*?n}x3l+fAG}za=h6AR!oWv8Uhd5k-7GRa4$VnD=u?4KD(;#gqu6+M4)6nRt7Bfe+!tI4}#l+QhI?=DqEFgT*$={Fr?;qCT?FM2y26A3h zHT{L4EF{7ME}zgn9*4Rd$Nc-}v=8XD3EBPLTTB{?3ot-95o!^j@YCN<1;+}bcn6GX z>Bj0FYE0B1*ZgQ=`#A8R(CSZ#=C!?kH2Jks;H=^`CFO}J)LI@2papTV6n@M3g0=%; zzXh?07^s)V`*a2NO%9C{+Q>q$wrv)G(IhdzGtepzgolldLK~4B_@92huE%|78{XRO z`C3uHfRIHIqJ|S0^GBYERw&DQex)#=fApU#{>cK@fo+I|l^jnfJRB9rh70*3HGP@~ zWS%2P9OOfvc_icFL5#WEBuAj_e(X%CLvfRba_`8N6-JPXF{dnI;}+C-AvqmFC;h zm4N(x~xhB;s1D-gXD76Qf! z0yYD-mWSI5@Cxy-xhMNCPpcXMRbYW_`aqy-nb5f5zULS8_8FA|hk)i~o^k(Zlc~GB zv-S?{6KkSP8PN7Gu(IZqk#sJl=#ZJi(uf0jT8Z?47}e32+rmfM5yPOhIbgm}Wz&OLGyy^jMFcz) zNaI83{8WO_Ms9jY%X~Vk(rRMqGz@8iz>qi)A{;=(k07f>dS>ezlV#OKM_vtNng%p1 zU1KI^=xycfTwD-FOw1^=>K~A8_=FmU`U^%w7edg6ySV!cmw9a`pqQhB1IGyz9#fVSG5X}d_E4D^2; zZ2!SB^p;JfFBEo*kzBUR$cFDZU5=1rb9890Y;Z`%E?|g}gktas1Np~D zxihI=jHMbSt<*ZRIk4B??Q*^xmrNFLq7YvxcqaIEbZFEodUu!0R|V`h2?hYDC<<80 z@0oq@?NuYYGcR&7C9{e#Ft7k|AOPe`m#`OrH&KEG1>E1^&t=R;#(M0iY;vw!kOD7< zLZTZEQ!_Oa&2uCm0tfjtO-jLIhYJO&ND8Qz!NmEaVZk!e>n}z$!6S}4k0+12jMJmd zNug7MMJ!9+lmWx>aX=VYE7N>OyvId*!jQo~xpT*=Er9LDkU4v1xhZb|01>|->@Wp5 ziJJsDy-6pTBH=s!5;AroZalh~mX*$kr%9Verk|JJ4 z*ESo{{%B+0Oof!)So1Mn=9S+28O!zt4!q!%gBwJc|5Fo%zWFZwbJzNGA%{f=@#BZV z*pN(I`sqwA7G?T;$EIc$PXO=P+VmH)lTZ&|#Bbc@uFS*Cw>eM#t1kWKfQ^@U^h`&> z+*W_(f%KXBL4g6mLpu9gf{yA>o_v*WGBra22N-89IkH$Fp8@<0+;fNxTgWgn{xFpT zkRu~YdPbFP4ajIZr7r=Q`EV%MU70~Oz2PSY1T&9+PGq*RP31|1u=?s1k-vV_97NF# z(56+Djqed?)xz>~F&$4Qk~M)?xSyJ>;6HeDQ9%Z|bei&9>}Yff{57jg%yjGL1U7Ib zp)7%i)X3dha5Vv^jY1`?HQBv?LD0gEEc_1ZxEKaWA=&-qM)A=gmaNW#~2?zOaquF@+e5Y@-{H;Wf)g@5;6Mga?}wshhiTD3EzzR1HpmyKLx#pr%#ZVW~fW2%)Vj&58)~3!H|qO z@0RQnOV>Xtdu<^v(9p;U)-7y4>e){5WOt=T2ivTNq>!z?IK>b9f0V(FY`VdAZxO@h z27?oyN(G2ZX2Ia;F@fzSCLjOsmXiki2Z()xso_A$;OrAl?Pwl>pnYZ^|L~L;0__+$ zH3===&Tsel;V7fK%_&IGws8Uj2cgMnQTbBNIN|{iz&wCyHxPj;obHC8%Pl@;zN=wP zL8l#TIcua+@x&g+%c>4>%_?GHo!%)1w=bH?1fc{tL4A^7U3D$icSGMaME~%4{)j=> z#R2$(Js+50XS{0b(*yZj%uDb$yfd};60*I4QV%VFhyi{CA_fq`ViaMV~Hu%wXe&HT~Lvvyed-a;K5X|TnNJ<5R4X@kB<~nK;?Lh z2Qq)3%IZ`L%nBGGfHq)&1Q`i%xO$WG0p$~(ojwB3S2$~wd0^Q9foAO4!irK~>3-DT zuPAxgZ?5P2^ zoxk_jVSZhIZv=dLyfvtG=jLj$FvU$K-{N{38XFoNJQ5c+)0Qyk-QU1_c9&muPF2G2 zAqob6i=^~wGA&;bL$QHnsp^I4HZsNT=l0S=j*16KyUa&!oMTv-el=SiHD z@Q`?4ecTcWpy`nU9ly4?zL~lFe2Z!PskSKN0q}!1V15B%1RxZU-ZVA>(c0F)ZU~iM z@#kOwhzOEG^?na&^J4JB6M)NC-aAA^k39j)PQ-3)Hf4J#4wgT51OXxp3_lP8z>u+t z0I(nhiBnCY*omR}5ULrl&wIU%S?AlUVuQ5$GK_e&C;^WaodR#gwUculm1$~q(zgFQ zm0u{nyk;kJmX_4-Zr zFeiG`%CDAx)qc2p{elZ*3=K3Eo=fudJy>cJ*fwm<{gMaYWqmm{q*x=|A`s|BNqpMD*2Cy9ug=}d2%8BDAsmO8TWGv zd1!wr+x9*Sp-*uSzup7k1ejmn zH>eg8!o;5~J&fn6%L$F?9YrE#NudD-9Wfy=pfm=+7U(7b`+!5ng-2G2`@L5Z9F-1K z;PTIHkT2c$>z_j>!=?z<6*&?DIOuG8-=kB!;i&!w0<=E|G%uWZ9y&63bOylJoieE& z00IGVoGfJpcbH>={w4Ea?qM!f#P_Ev+8}r=;%-C=iXlC%)#ZSOK?re!c8oTGhR~sY z^fGn^%#xki#Vv;xMcA@4m?V4*=?w*i~EhCOYnc=?1+&e z4k7Eg-h86=4J$$rM^WGwFbsKk+v!V0wO62wSp}R>P)B3J50qO_SPc9S13EX$I1SfT zPnD~J*tjhVlqxVp<$gEmZo{F+F<&Hou2Ss?1HkPdtT8G{CnFatw3{%<&mlBpI2J2Q z>#8=1n}+Y2E(>-3UhsENbOL|@1%|_dR8JByK_uh)%|aL()IdN5L~!4W4};%FgXk2s zEH0$rb(TX)9k$={_6vZ(5CYg>?Bc-~!r?!+QgHBig&UeIB1{-B0$GU&H|GFT`-5e|B> z_43t-E+yFrIfc*?nd3WLFIIxgB0Ir{=c02gz7Y_fHHJIe_wHI@n#2e~@%2mecOLXL z&+dBWhY0_WNLz}*km<*v*8nJpArKd9+UNkNAcO$Ez$8b=OV6K2F#q3jprB$r(ZKGP z^JL<#N8ab5c6$M!JUh#}A9A8}ZxPEQ!FS-ijd z3x-EmK}`YwSaG8mKf{B>A7rBc;tdSru}qg@|9|K>z349&%i`UFPBc0OmrD)0S@`@R zA1^hMF4GyV7KXom7TbUQKe7fGGm_=HOcF_OT!DiXXGX=?={p2H2W=5F3zX92-7h$@ zHvHKAi4#C1oE=%Ut|%CvgR7$z?+1G3>|h=O8R8E>Q)1YKS#ffLLK*EbwbSZb?p+lizD6m0OOZmMWiYg=wEKwll z%>`oA_xT0@IK;|xBoj%6G_fSW)yb6g6&`5$I}+oY%izciV2loe>LH9IlejHy{Qp46 z7=iQxo;0E=A`20W>O`wngcS#aen$R-)OYFQNq7+=0~K%|*;qegVbI8Ni*1r5z$57F!DemOhhyWXw0|A-8HK4Lj*CH8_bp_z#R(%A*l=U_O~BBPfp&n*$i&HF_13)$O!AlE4^vLL?P`9hd-j zq9YFeQ|k};Z|GmJq{wONv;jOa=m$Zz9IBr>Kp@TnP2aUu)lQjL1u#txn4r%IhC4F= zXMgPFu5g?-6f}H@mGnL4ZgmWFx(++QaUyz9DBcYksS+WC191(yF(|vSEFP{_e)wPm}I4BK43;>wt9X*!oIXWhr2M#sAOp_6@-q@8SG{ z0<&mFpwOM116N(YoW`SMC6}EBm;(;4V~_v;>NPF}|2#g{s;YabRh7S&Ph7C3zkCxk zXVQjS2$`*$kB_c`_qD}?MvmooAsVv$DFmIDV*tMOe`ojVVW5vgc+ThOhP zvYWBY5I-~ymMl+iC)INYK|}%4m|_B@qf1qg;$L1&(Aj-aja$L9PoS7nF`)m7=Ig4W zoOL1iEatgOmMmkg16SgkwvfwZ*Tyi$a7R4`%PW2N&}B@6;K(Tq0uBSdOGU8yoD$>i zr3SruAwZ{?=HtyCvhRk&d%CBvFE2G3IdjF59yw_n9m`EeFFdA*@1XQ-DL~#t(8#;K zgFf%Xf7;EP#GQ1R)F(LFe_SFU`!3L9*tkx1G^Ra&fBHE0b_2a(eBHcOoS+Q*yY0X7 zj|35qK3F$tFao3|md@uq4`Ia5>?Y4V@xgWFcFq|oau!xw8bHNusT2FWLDSc!nuoes z2T&JpZW%j;c{5f5hslb_ra|Do_GI!vM%=JZ2#Cd zP^@EThyxH)kSqYdRlo1b+ClCCohT;A&@u#~klE}k7>p68O#loaHIZ4}MfcwY0MG;I zJxLb66IuHY5va)L){mNTfE54(jB~4)SVUF%_oNRKmw}P>5U^-*i*iN7%G&jBf*J3n z`z`e<=8CoIk13{;Z6)t&#O%PBfA*#P$pv1#AAjGIEw^L_fV>?s(eP#qP};NlY8ix8 zjtiMEV;p(vfB(TzGvVrw4FOCq?lA8l*9HivE^-Kz5zZQ0zg}N3^iVv4Oh|K?-#v&| zE%$<83rP2UFT~9TxBs(E`st?a+xuY13&MdIhTHGF!_gu9V2i(5mWVw6)D{f< za#>m6z?<&zM@;#vv48*o4a*_zGzGYd)Y_ZwrvKdYKAnG|>gIgeX8ZRiyxb-`JJeBL zm#r`V)o~dQ)DaFZEHbBh5ELAI!2tnq*|F%ErTVnN8sk>eIJzyJbqR(`=a(H@L-(x?DG_0%UegBQubGM7dI?Ccz~GNx6S-e zN~7!gb3!4D@#rsawiegEj`bbjm8a?ykTII;bKM~g(62SRNrv&%$Z7<6%_I8a3%OboFn0s(}3DQ?ba!pK!<6PN*N@AtU}Zq!;@>!UQ+ z?WG0-K~da7!tij=FgPQSxCJd=E8-ZyxWbf@0+mvX2ptbj#sQA?!W!4dxh&2KsX8q5 zppyrd8~|w;mEQIhVoVevRggw%Sda&-fFotay|2EeGs@>CMbCLXU)6);5GQG`VE)C4 zAtDkVYQ3rkOZ%EJ=3DZqezwFxW;BECAa;B0$=FsbgwIG=R>lq-hF8ZVkbjA{Z+G7> zh>T{?!wPC7_Iy_bpF)#zvK#ZUKnRdL7aDutx%`L#4-x5voVGM0i9) zR%rKdNc(jBu^hL9Saui&>J(w(znddi1~F(9#dNP{>)0U&f{aqL?=p&15RBCW7>Y5! zwD|Q~(`Lv2*|8a3w=g^M!Qub~huyGnQIdn4MG8!EBSK&BFUt};UQD}|_L@fMfgJ?L zc}!#TjMjJms;Rz(cn+r~LDb~PI-HrPo;#_IwKeITAjCQbP+ix>@KBn9@AFS!n`^%- z;1Tc%7<>XEkTmRqjS!-u#f4@q%2(q`6B-XJIsq~mT@C=e_ZOa7%9o?p;VxVJ(P078 zKlQ=#t$Mp&BVnT$yc$6``qxz5nbw`2A;GSkH>7y?P$~}CkT7fra=QSjp;Y!}b(<(x zUqNQ*^#h{qoBqtDqR;1%gu8eQWHi2Z4|%e_hZqtC3O@=86&?}nK@KP*(W6?6i;JrE zAgtM-Xd5Ei7;iFa*S78KV+4Dz-St7rVF{ zon3O``SKinJTN;3AVq;^y(U8XC7*~BH<@DMC=3DjKrPgbiTebWc5Qb2CqmD18;v2c z`NojDeA<|Us`P@Hr2e^8^CqZEQG{fp!g9Vkq2Elw6;&Yc{7dG3{qsML5z0ZzK`8|< z#+NV0>Qf97?$5l2(FT`k161}YcMG7XRO|^LA5dfgqY58Od1Zy_P62c}Cy@bLED3N% zKzef#5lEh2v?{*-C^x|CN*ph!lQ(9>h#|;@h#n5|QQpRjip$ywKm{{oI=iGUY;H3V zd8heQ{fC_KwKuUKGi|bF4$}t=`fH4nbRG`2Bx_pTYB1(h)!;Dz-NVBf!V#D9?{c@< zd^eJ*F?4ro(_64IA$EB)CUE-~HH|eu*>QK&Bbj({Oh#K~@<9I7#d$7X^!f?K^m%Dq zyN1vwt z+1fzG=7Bd5fKWDW0K%W}3E%cbpviGpo=eTo8UEC-OSacJGLD)bR zgn?N_$?-ZTk|aczZ46=~J7dGC7$~d7hT;5@4VN+}sV5zgj@HkO$UIz%GGAuH{ z{7)<9pejz*pGA`ZILj@>Lo>6EM2<%u?}-xf_BS`8;8O_7Vq@24z4S1G=kZR4FRSI4i{oM68aN?vX-hMIhSYfd(q?h;}YzBju8X; zlZBgj@grC&lGb>EG!I5XUN#e;Yx0_)Dx3)6@fU40eqr@^H3rfsVVj4<90O!yZp5om zjC8AsT}P8Lhsx$XukC>>NG2KzY`>3i$ynq5{JdOTT?WSx^to#qW5xx-rzl8z7tH>{ zdu>fT_`XpFn3JD;K;z=R=`?_t3;h4&;fyWXz7^0Y-|-0g z@Dwi z>}rBC1DJ2bht*u=Ye&t-0=zyl?~a}a8R^wA4FE%F3_C78g&#l+?TWtK!-ksUyL-lL zl#ecDYPSr-AJhhD`1Q>lQ)iq$@kroja^He6%30}5vxxgL=^&`ttk{2!O-1d$HJ>Gh zP6oFuOb=B}63oRSkw~xPSfTyfn|s7KJH)$#(C9?~5aa`51>i&ugM)qX?;UV9Hi~C~ zBh#jpb?>3WUqKj!!(jrtA5b}*pt4LHS^t~(T+X?|fD$2w^cu*`4Coh}ZmaV6I0p7m zMl>4)1QtDWF-*FhQ!*o<1qltN?F|*?$TS3H&UR9aWc-}L^$ZJdlmA49(YE&nvw!3E z6JX(12h7w>fJW^;Qd@tH7(_>3S&*|GnAg)jx#VXh_KCHv<@!I!WPV*mi4rFq7Q(_s zaYop7?$jVNmY=j-{E+hXC?+tMjwoYefsYhGth69L#-#=dp%njZaja{XCV#06r?A4QiDQ!g3w-6HBc?fkWfMuQ+Z5e);7FJ{2F0JsT2kVq$>?bo#XehUEP z5cC~`VCeK{{~72T2EpVd=>Fj`iFf*9M%v#)migj=lv4&o2&7mp3{~BG-*ADtclQ!L z!o^5oO2l?T*sZ(Rf%e`GpE+Q*-!0(yNG1?d1M`EB2q5yLx~$vAArHz)U9#?VJisIn z6TkqJqLknO@)kD#UJgvPJv90enkUQoSK2s@fhJu}-_!Q<>lixR#Yi5x8)OE^P(97pGao+0xk z0TC#WB(i01gSNHU{opq?lkS+n`>ut-)01cir06h_7uGE`H7j1O#@kQUUjyK91dw5{}x9NPGhLssrGhe^m&xPP8}w5L1$gt zU^oSMt5aE#P?UDZ$d=sVe*nW;*9&JE4yWwuv{F)!Me80+q0+4`|h4_bgR|U)f zia-$nWkUdiaAt~;V`*WUoU}xqxqFpS2tgzkXs!=@S1s+_BwBVk=h}t`jC3$;Yi~RU zi3bk>%BA_A{0J&y3sjobQ>w85Rfv+!6#b_6Ubhh5|!E=JQ&u*7>8y52*1$9{$^a>m#`L z5fIlvUW)OyTJl8xtDWySm&3mNykk8`euj$E@V-;Ctha1Y3{K%u;<1X^ahRXxOVQi+ zQnP5ZDflfy&~MJ1C-%Nxr)Cb6rVB8OS2y&2Eo@nB%5%&SCmT5Buwxr&Tw^Qs{$?z^ z2dg}adpT&|RZTD;L_y6!dZRsqVEoK{KD?vc2gpm?)Q~&v3!1~_@LWLrBnSlr0%Yc- zHZC$w8gi#-;GsrYN^)4pHQGSdwx)@#2Zz`2GGaJ|xgn&b7G9PZR=Il7WX8#@(RWX7 z9>--P0DEq)upf~F?XN+uXjL?sWTqeg=q2yChKT;eQcIfUrq&cE^cVl+(w3GF;8$<3 zd3Q%i8f^@ATJZTnVao;4f$uh+#~IZgB1pWpllgLBqZnRYDpku zC1J?o5Ank=w&z_hXUGU|`{1aF{)Qo95MQggmufVjHo|Ph9L5)|;16q@pVmeN^(?^3 z>%SN*6%n9XqFsMa2-J1i=pb5Xx;CsE27#b{F2S&98U|uUB(0$k%UBs(R#{IJZAOc~UY({9@Rx4u$y&cSN)a*;Rd|Y6d zcu|l;=G~Nr{I{WqVNgtHM0pTP;4@I9}5##3+@> zGSJ#t_7Q|xEATBL6b%ChaznWW&4VV`8p~tqwt6X7eIRnLLh%@RiE-S54#b9;9U$0A zLdNQ_nIK|X&;P$*=s?35i|+{-dAT^VL2@tU+#qNtX)sa;0fJcliVtkZQx+!mCx0n> zWL=+kPqr{tfs~ekgkG0b$C=$Ec{C1)=;SQ9G(+48LL}ypr-Ed|eAn;i4;l+px$$DiIBJNfjT><$kQj z!4P0-s`GvaRJb19bJ+iRnU$0Xj6mR0aAQPEWZoX|bvLsB^COZNF3rEAkJC(3*d}EH zK(|`omE9-Fbb=6EZQ?RNzHydyI2`&9IWLRs+3eZ)zJZ{627%50kJwkBVEluY*gRBU zMvpBBN>{$RqoIb_W5U3j{o}t!HK5ZFfNtm@;+~uyeeipgf479gV0T0{eR8|Q@0I|w(-W8O)d}#|-hBqT=dqvKho7?QG++dhM7aXGmYl{yB zuYjzQV<+%u0>=E*m9(NmVnn-ZvDm|Da%l?5p|-@Bp3fsRJKH_{8nJYeNf~i3gS}K% z2KGpx|I?V|ulWhum0c%oN>FftX<6|lhe2ErBp(pRMS^As&KJ;lmzg6(vrm7$w1^9p z^=o4<@*Gj2XO6Fqcij7LYIkl|a$h5C8AnGtl*+@@=pDt4>Ow~&dMd$E5g{fL@X4Kn zHV=sG#%GV^^Uov!oq)**ir{)R>5I!EvAb~2CC-UI0@u29liW&IDF>x;vJ)6%5{_Jx?!VL6RVtZzgUc`iK=agVE zVEN$D=o5TgzLYS2_8LD!;{440r~ZK%;WyfT%h)#{F$Y1ShzFoy0RlhQIt;78?y8$s z-U@iXCJhkLK9jLkmmfPi$HSh+-!UK@(v$=vMjjGhT@AT{PkZ~@2oD@G`{*2le%ONA z(WCrxEtb*atz1j(HBCw)Vn!v0lj!t!v?CpTqlP<@0ON1Ekm1VeWGWj&2s+p+OANzJ zRA77jIaX0AU47v9wRbUr6AdeW-ONBkPIy!AG&RfHarRy9480ECp2y`kOqf7q9@f_y zczv;+Qy!ZL!$w32b2eJKw561>f@uBHYvv;#q}qIcps7Vn_3bZ+xC7Rp>PEB{mT3+Rg#PRcgwWR2cBNg)e zRlyIta5<~caB!N^J7M3V)|$+QX+&4j79%1nT~p^Zr=2cTSLJC+adH7ybCVfCw~D!Y zriKuu1A$k{YZ)Mz-_{~h1n*vX(-a*6=@IA-NT^Y0AD0S2qo*jU27&xRXdaCpE}kJ? zK1M!|^=dVnz95KNcQ1ZuiH;w9x|T^$@rl*_o|QokcHQ$rpAXn9Bs3|XU*Cho%7Kc8!_R-#TIs~W>jIdPd!v|R|M**h={05W9j;kpT6H(bgUN%hL4%}3az4%o zHYs&KgOl^)9@oD&%eA(kgM$EO9B^7)+^_%;3gf9iTGh~n^tOMCHJ*Dt^7sZQi$SLenYbJhbj)q3YX-t7ARu-BFMp=;w~jKiqoh_6cs@d%1Fr{OMDi~)i@G3~ z`audfJ1QhFD?afHHMh7wG8j>$4W9{8X;ZgN!peb=ETEycWWS0(;b=cv&LpSVSIYVR z_=@r0izbrDNdxT95o=*~4xWTWk?}||*pxVM>>r4u6C@~u4nEWh`gR7jQ_PF}?Y%U_ zR5fwG%Xv{a97yTq-HSGmd}L?ee|@b;J|o-pq$Dj9JUfgFfpDjm(>!P|7a0t~X2L3V zIN{GM0C*8Sf39HvfM5{I`i)K+lebZ829OvYs48fQ!!0DmYJO}?APw^~$$G6^0 zx1hob0^(f?D6C~P$1XUbaB@-3!X&2y8%)yM!Sa~peh!3!5m8~L3dT74Yt#ocpP#vl zOR=-l3fA7w=7D`Lj~HE=)fwyqvXA@v=0r@=2z?Q>ee!1))Wy$EV8lbrnmAk8`bRP% zBGpq&F-UaUtJxPVzn`{XE!n!^EQl*cShJ58M|LqDQ&zw3`{rk4a-*@ zG2|X$1EGqkZxm1mWb&Z0 zV@I;5gUAJ_7K6kMkPsk6i0gvB4r&UB4)?qYnlQ`JBLN8zhe#fOE;VfV-M6-Exqsce z#bU8sR?w=eO<{o0u*VLd>*wC$uQv=O{e;dX!9W1Sa97MOK?|45b4|-yTsdW94Hev0 zE0^@&c09!iI2@8pd>mgD%!Nc=7zAQVrql-i{sMXVM=bF=)`=C(K}7Tf`Tf_wt_#jG zOsQ4VvtY!`fHKQtKJMqd^E_Zex4tpN1=;$m7ytkQ^C9m%1~{Z|(4HhfutNwkU)g!P zUM?g@@Q1nw_xZRD@yReau1!bt7$edl-Zk3qDR#7_gh6Hxui^bgni z3@E{fGqo!HFH6W8U(~-V;_B#TAU4cqQ%oeZmZ|Y&zXT^+FI(3j@)R%_&4DZWeVGrp zgSSN?g1c0CvH$uO67T7D4@&m+_`$f_Q2%RjR+bFzd7;>d>_*#&sQ1bSE6#0o2|oxzd^X`S^0Ib_s;X^SVj7Kav{CSr> zQ4cAtk1u53000eXA@D#2xQV|1Q6JRB|K^j(|6k~Oxt!BKeZP0^XQ71&jO83&W0g+g zAO5R|a=|c;2?b^BJR*od(lJ42eJfDP4SP8O`EO<$xD97 zac9xWLWViw53_177yi7FD}g$z-rPo}|1W4>SLM@|DaZx!fSp=LC|3A`P`q87ax+2f z^J!kle6J5_WSP*H;H8>Y*dAZnCl#kg!Gno<*rSdxOT2F9<=;jWh}8BG(DJV#a6a`FB!UWW?7RBMkhK7QQ?^jG03i&k>+{1`o+4YFNB+oZ64^1l#RfhS}`#gD($@8~4MK@pxZbmDIxabDWQxFPVRO2g;{ z_!WveBmxctD9OQMBVrRl7}rTKcIIL5fy>lT`zxlx$vMIS;LyNKaNt9*+?CL^?j9<- zY12j!UK|jZLBH)+%=PA%(fP0MDaHHm%Ndpzskvs!?~C;2C{s~=XY1`nH)Z{aET$N? z-RDC;wtWceqoZ32i-kfB`%a;kBTNg+v~cMID*O}zz6I2+D2lMf>Qr6wyV+!rheSaL z2X4<=Fc|dTx-J#>{K)R8r|8vkp>$=U{Z{$D{x13laRNacCBDKt_y++cBd>m2*}-XA z)Pe@6p)=QIV2Bk34m&Bq=Bw+dqkDCip@AU<7!_6;8Fospw5{dkt<#xG&Vbu|i#Ou> ze#ieV*eeXi1VD_znqo>am}SceH}f6GNPUFU6x@jCh**uE_5@H$1=0*O_JN{umujMH z;Nx?(tjy8a>uOSpB;S@-m=a-7pY4@? zd#!Kg)o8eI$olo}z2*dW1Yr*WprmyK84>z5*ELHZZ>ohW;(~G% zPLJa^PP+=0#}%GutbZSua!D_2OQQ)4Yvyv3UpIq--e15PxCPJ)z$t;?2r-JL!p+=2 zNF_k1g8|g|yo@L;@on=3h(x0q@*xk0gTeN04n6_y@Y0Vg<%z!WLmRVPL?8knS%DC; zpi0Qt30`O%30Zz*2K5Mk0(oiAZ6V|3i&1g8>y=_;5_f+zcH&_3!W}dA>k;V@k9jd) zF*zGrE?AH!Qoc{8a!1=Neq$#4Kg5a=;dR$hY~8g)lQ4Cv2*JEyghh^_hZ4#(jBo542$&aCg|qHF$*HXAzJ$%-IluARRfQkbxi4 zRi)^`kToO)243z20H7%rAoWnox~~6;l*8YD@)BPKCMiV%B7{eVFIV;UG*E$|0J#_! zBN3M*BVH)1b~(I559Ai$eCaD`i&$^tRF&)kHUWMTP^v3Z#u*x$&wo%Ty6d~A5`z#2++4zLV0>T$S%0bXOiF3Z_4d}F_0OyArTb>S)m9Iq^1w>U9&Cpl29m1%}yzMFMnYYZ*Sz? z(Ev1I0)TPyNWIXU13z?f`@R6wVzcS>1gM<0m=EAFi9k*S zKt=%X6hgRkV}yV(oJO~&Ay5*=opI>))Sz`S*g~KBdUf~c3gEstaV#U}vago~9sFCu z>>_CY@DbGT=l09C19%>b6a$w@+PGMX^MpsJO1DI2K+ zxx>CB+-ghB=`%DwFSq6#Lz^Rf75P`~l{6HdBt9oIQ+%%S{Z=}KGMLLp%%Y0=y%>LX z;n`!&sF`88X75d3eval^sLfS!Tck%TLWZP{6-5Uva@yd0SX>;qb-?}Ju2Fs_8AU>+ zHJ!Cbe2s{Sz~MyrYHR|=U@L>ha6T8?GfbJbTC4zs;0RnIFsOOLqe71DM^oSy0HzWw zhi!mOrHDrnwLPL|A@X@;-i*B4+S~WZ$!i1 z^y?vCr-uisI&6KgN$4ud`geLk0S%@-P7@yFp|YMOn49ym3Affx&c?w(S=c?8K=}qT zmVC636VNS($_rQ$MTMz_IjPhfR-Ym^$YAH<2fcqrs$vpxh?KrU<%Uh;E%*4NFZ>7~ zF+-tGefRtfBg)=08u2xVyW2!-^XtnX>i(r#m6;Sy%pdg%iI6H#J0;h~L zYs3dgK?DF*AP`VlvBCgvUkoYe?2s+c4rajSgHIAX4YudaU_kawwuA*+sFED8&MwPQ z=-A%-z$bB7Jo)+7YcO<5D~mKXcF)MWa3Ax)nW%f}6D?<-ZI=#mT2L#7N=hiOs4pla z2#`nvi?S)Q{L#X0ltMO6Qtn_(!8#aYkOUZ$;fmY0n2FPB!q-)6@5(BW{XZz4^ec!Y zgtU-7mFyD9EV2=pYv}gMA?*hH#x5^KOZNJoJ6})B{xE@wUD!5q_%ftWk${XDyDbY> zgYKVZ0F$H$wl8H%5iub6WrOUt_~J$+`&QSY#$$Ym)L6GSf$>5!Z3&%zl6u|qO4i$J{@XH(|9&G^aK+$$G$mR*VVEb^f zSsmYDFz`M>5g+aYSe{HAFY?DuR+}1g<`5yE96@pJ#wKF!xrhE^?FVJndnvSV?<@n# z`5Bli5F3^tTKva+j>Xyq7tL+*)r16?B6wXCN-)TUlw{7Az{gH~BMv2BD=RO`_{N5z z8KF9i$P}=nJ1OV+-#?Z7M3)kTM59gnh4NHE^m#~&2OVhKMFUCsLFoJU5w&&?`_k_t z5uJ$b5Cisg^m(~|EJ$R940n7Ev3p(GADjrQ)rM(4}ioL7Qxwos$tDsSvE`h%FV|7{OMG%(F0rS z^nAX+_#KiVhqgVC1iL!Cu$2d6K#gaE`T2Yjz>5N_0#p z958@`1*1$YpHc`G6G=)OOnw-rchDsV;ZQ~~>crUZ`*hG0jI4cBvXeIkX8eE~@mM&Q zF2@Dje_G*qz3;wgnfg+d@FW~booV{xl{jDxx_RYi zN3VdGw#Msl#7RegzyJUZuOab71-OWU_PTX3|5F#&=A!a?*ZLl=XC-g?>w39;`O8vm zfBvhOwtk?P+&n8UVQKOhHbgF1Q^rc4Rzmcqk<(luHhUh!NBEwK|78Sgx2p1f1}bY4 zitNoFKh6JT7YBAAYuy&TnAKX!?Ezc4bRNo)CR$&AuD!w>73+h)IH3EX3{2 zkH2KmLJ&wg2R_wHP#b!1{^`q1*^kUb#tzn-<{$s?ln>^P@96U(9reGHM`yR}MFT+2 zN`pYqGm?0+v4K9p>^{(w4jw9l&l?AJl`h;aCk|h6hrt5~i$r_$H7EKE7~~j@7ngNK z9NzoY)!ygwzvEC~i40H~x02{bhJn+Dl@`98n}puM$Y;``mEux2`J(0|IMMDPuzup~ z8wP8Z4wk^+jTdIk8Z(k+!b~OM9gaJARK-n%p^BXYCC7O@$Q2=8s}wOzQ&qog6W~4usdxQazs0AR<&srOrb@8 z|5$w^2|rj40#bl7r2`uot-e?Q6w;&E z^1yPmtkMQ=DnY<}3k6XQT+!(Z+3serNSZf%Uq812-$T2ly)=uAef+eM42)xnj+czb zm$`juKd?CDuh=0t(LQ?p-b4m%ck??9s0>2}j3R|HWztR+(BddOVkp zwJp<$o57rUgi!fk(+Lnu5^#ZEM4XIr=DIIl6%yIIYv?HbYpcR49DT6zVSBmN!D_0f0 zxF$o=s;%1)<%igo5)BSlc}z&Q@7ioZ`YW;aCKe0=pXqUNAA7!Ug}#u53j+hyG7cnK z-+{qpOxU<9VIfP~{>3hV()iK}&@>N?#C*m1n>KtyZ!pG20r2Q<)W5;hz#VG!C=Q9) zGXvXQ*terDc=3muB`*@<7<=0%(MVE5K}%d*$c@mQ?MQ3e7iKinVXB6s@<9BP2{4z7pkyHC zB2p+F1}YE5&^CoZuyWml@Q%eg<+0YIxgxI6?AsJOOryQSFNbr$ zHK~X29Fg`ab?f@*=L<&bs>y4+W9|pPH5WLFU$#AoZpV-qoZ_=0ut26U2NGvYu~&4J z90Ri<)kPN_07eQ`{>sIV9R$v1V7c^FU}~4FeP;jqaHnFhd-d%=jD*OA4hBY0pwPAy zff$1N1~k#MmjM)w|CfI>I05#kk{A+<#{j4C{sulc&VsW{w`+fuB!ZzZupu55AW(QADFVQV3hkMHI>LTh8m{qS}=%UN|`E;l5vLf&f^3L?|L9QxhR6lWtl{c_Gjln|n zX#@co#9`z*A%3v21)$65b@qN+vK)(5D+Kxn$fXb1%jFpX+;QXpStDawr|{~eI>(iQ zFyme6;?x;rycoU&OEOmL`s1LQeBtQ39$i#j=&dTW1-7idI~`8|<7o(I2Es(n4l0eD zY+fTl(0eeMsM5V3gd)aPvS*cu8d6e5fuL*{?sD3fo{;h2s1yw%2Etw&2^^L2%Igpv zt*^J{A9$s_TPH4f0@Ruh@0_vlI>fOnX%ll53|+EUz_< ze9-YWZT)@nFXI5Mf@>UrbN~u_T8S>+3lH1NPBcn02@Qf!VsV3nR@e+~r{$I*@g24P>hieq-2ITN{cs?W3*gs-lG}9oENF=XK z{#Nq;35Ui9IOHT)y}P07?ZdEvw|4e*$5Fd3!TfQ{bZE>=4IBRv%S34C{_jVYy+ee- z0uNcA67nfVwS}vLGir~aG5BbGg$=tDdH;i{kALuLEem$YIDnRjf&%;WN8~~UmnJ%NIuP#4X(lj6%m60VX<_b_vNtKcAiTE3kV1y zA`)fz3%~ub-#dyp^?yl&{8`vDlG(HG5`sSL&pB|W>5g3kmYOF2AGpIY?*|GiS=(_# zbayw7A1K#)LCju<6OW} zOsxNBS}1}bXdXgHA6a1{QTqnL_~eZqkNO8IwjN>>fRqklCVn3l*Px)Xrsk}J_4s0> zU(4Rj;z%5Ds)8dGuboFv)9^$W1@0@d@9|TQx#`e21n5{L()XuZlQFyCYCZTEjM>t3;`qz z0AB?caab5$ux+5U*&i~@T0(E$J#``{1BsHs#Ikn|NZL3dZU=FAzFBnyXhca^k&sMQ zFPWLR0df`M?LBoZG6Llr28g~$CBVIAe5V|SPoJsR(z0=mn3M%vG$ zAQ6CK1|~D~J&n-UeJJHKqgGN0te1|Dd02vuM}8Ne^4gMbhm(m|RR#adp5j~n(*DF= zc(M(qISB*eRu9Bc$NV_NsL{WOuxuMP3Lj;^j;Cd*mj)>YdP{>uLQwvMXe{o&SK0_O zABubd`zOyv6u+b-I*XuySQ9bU16jVX3JCJ1!V2hEk2#-$vk!&pspG-|1Ox+DC;~^o zQ|47fV;U1#=o@J;XWCn^01yPC5DJg{2P8!-85P+j{oFH%JNx zrgkvN-}0vIr&?W>&&1fU?^Ub2ob{P7wv*Mhhw zU@0Rk_j;Q5@@N@Tz8edKD3?6;Ug;ukiUX$t5_ zfI6DVZLSUvY+r-_i8Lo#m+G1cvxCpj=zl|@{JVk>AqN2va0Do1p#YdsxD{_Z$7d#Y z?AqJFan_NlftyS?R9t7iHf7wvXOi+p4iJ8CdcoDieubC}DW{rj6OY$J?C?5+5z77U z=3xEIVgR-Z9=&hMQY`}IR)=zd&9a;$8W|bT4%1&Ec%204000b0A@WEDIEws$jHthI zcl*E8u-{Aj24ZXZZX*}P3wQ(DgxP(gdpsI!M{_*`Z?7-0e+R4TuAGrat3KAvbHe?M z_q!?58+R4``XjqUJ|6n#HYcorlfh;JL*?8-;v9`I{O$M+?KwX!3FkAfh)Zj$g%DwO z{caoZC$8VsUuD#kANSu~@OrqxWPpYApP&A6@Pv8j(=^4;i>M%s%M2>|59zz_Wrw2| zZ8t=ng!nfMZdJ~0vC*%0kj!H`oePDPlSdn!S&5vvkN3tPNJ;s7bC(VvI3lZ469hN{qu;;N z${_mP1{XICE2-6joZ|*ddip~Hb`bY|I}!MW#Q25ngg5R|O(Zxb=-5W_gv2Pm8#bsQ z7cgqDJuht=+u1o@vqgr`H|bbgg!AXU<=|^*G`ZfuMi2`o5x&pmPREJ@paxLll){ch z3I@O)FY>#Fp{JmA5J(j?omL41-Od^f$NIHZz3L$`0O7F~Yb&`JrM4~+7!*%74~QXc zWAC$Q2Mm9y!C(zg-r?gaSXd16T-+IxH6Y<|3BOy8-?St^U@|brNf4IR!wk`g(BQ>J zZZ=up7D~T0Df>%Ru1#Nop5dP1;?k$itW#?3=;s;9@Ci50u zo}oWtjKlx$p&_x)L=J|>mcVvLEZRJk^}?Cyrk!*Vqy;F_~wkJHqU&n_b*5%I3^CLGiI&m zsKHe@hcPc3Uq%a$3JZ*Q>7s(!2b*LjXcfoEJ+?#t&Z;y7@dbvcHVy+o@8e5N&NGM} zR4y;e;zfxXI8cDVEjNkx`>2sEM0{P6<=+DHIfjP9;nRC5f5ienmAEOBIpBQZh-@f6 z|3M(4tpw1QXnmXff35qzlnlyR9M%)Y~Rkib}KEvN^LPxC@7zP0W92QIN zCGX1WJS~QzgtCN?L~DJXYs;Ks1!ZsVdat&5$O-m8gRA!u>G56!K(Htc;6&$_Ok^B! zGX6Y-9>4S&RQohB@A$5sre~LT6cXTHAQwU+F%62S8ZZB#|Nc%kBu##_dBP$MUHymz zC9;a(J(*OyImH)5QnnvGmKfd^F$U_5{vXhK21!dOX!*bY;Ew_5id*&}2D!e!`VZx& zFPx)0h8B)p{%9SJGsgYsy>jDrf)%oE6rZz~jff2}Y#IiEuxuIzf!VlD zkMujl$RPCiaN`88RTow2)n>Avjef)})K3T6`D&{+;Xwf-YSDFlxJVE(l0?s(+LA_2 zo;mXm2o2CY0vEiIZCPSnP-y+Cz1u-9u<#J-*+oY47nCgTK9<&PRnCJyvoAtU(bOTg7=fmT6} zfYC~)cxtJHU~ewdhK-~DRU3<0Q(7+(=?yh z6xc*)z?6|&P4lUn;jri|2dtUmxqp{Y$ZluDq>?Yx{5UGM>TyFKZj~Z}`0*N6S}M2}^bJi>GR#IkxkE1!vfBpqwfscW&P@ zB`AY7M9qZ7lP?q)HH>+D2u_%6qYCMQQMt=-bDjum@6aGrgmIy=SzOnXSsqDp7X*&wnf7A0=6t0kCfm0~gj%fKrUO zrrw*>xCvO17x(TfHVM_M;e+&yu=n|INtJR@p>Ao8@xb*5+CQ5=C(;e% zw)U9ulsNi-;|LXJQy>dhp%4JJ&scx}B@v)_0gkW|fYp>N00>q>Q-_fHh@!-Ah&y4NB5605kr?-E%$?_XO76+aoGVAt%0{~+G?XK^^ z?5^aWzgK|ma>Ul8t3ozWRn?_-{4=$$o!LOedqqd{`i#b(Ri8auxvfB}da)*<#pheM z_;Enj<-f1`LsE1~DLyC(;H5=}Lzl2Z!yuK2i!~^3PHGuZ+1apGMpdrI0D1ctOEUe4#g+&9P zNGxnajKUMq^? zxqXUZQ6j|tMlY=raEM8%d%qBW1|c7y+K{*SHYgf^iUxopf($s)gvD9gx6Bw20p2SB za8A^X6aWGNd^Qt;0|QRDumCnbP}FPsa}<xI?@;Z%_km{|z`XeH4%@3-yB>_a>wO z=zGV27(A#*s`VG3U?Lqy^ZlSB2aGs1xNXyTrvdl=egp(d{{*41W-Tf9XC=9C5TC34 zfA#m*+X4&%USRb0Hcb;J=lz>Oem%RR`@hVHfM>c<1O?#0jag}H+MYmGvt}HCTBd4h zuK+kfdeFefJsHEPfP?<;D<2%7fpu|x6U@MtZ6o$23?Kso@H}QJU<@uPMH1cH`kGe3 zk+?*s9*K)WqA@5g2b>V@3MLc?C-B(T&UMXOm?DCd!|X}K|NGN7ni3p8j0|xCfm7|A zg2BSew~L4n38{VFEC2uvjv@0*1~`eop)v83I-}S6tC&pCu>L~w7IlS$>54f6o@y?i z=X#UV_g~Jc;sn<=7Y;z_bC1CwM}&eR>sGjN>xM>(_raiZ&n%9L-<=j~Mr0!g?bMBm zzJdiRE5b%lXU<6*>$O#$&FG*%>_3b6=b?r;fr_zDldN+u3T~Dpp0;R}St?@ZPK0`J zN(9ke7CV%A`%z+r`}%Z>I|13>ya55QdzMVapdMy094F)U&c zVG!Lt13>!%`$r!PGeGAeS`4uU56GH_qyKuAIKnxQ%9?l+=7)nX78I1>6D0?~q^)OX zw;n)<`l{DWu%BpLJQ<4+F?pz<+>2brl7nt(d%oPK%AKn+q@~1))?;v>oM2g#RZz4n zaP2*VF(Qy)H#6w-5C8TO<6h}7c#Yvl5;($PY%l+bLM$AlP&tVpa2;4p2(;5SZCSIH z>;%5W!3cRqhXN*4aAI9?W6cPD<7bf~PPq94z336#@`M6hRuUWPcRz9h)#! zS%wh*vSNaPPp=9qk#@QWX|Un>-(N3bfG8EPPAp-<;HoYGr(K!slvI@%pGB)4 zT2qfHPc;y*ioUB$e8)so6_`?%?}wXRw*UA6VlUUSmJtE^4b?AycTAp+*oq z6u{pIL~TwDJq{gPY&$LkdqeOJ#zTR`97;#1ISrCdUzvjs5Q^+C&01voY{NXt6>HG%zCN1=SaVsU`+E^RU>=01I^kWncld+NAz` zxbpt^>G}kW5Ikl$#VBpQEy&H+bN|nve5Z3VslRBq2-b?YL=LH&TOp zrS?})h_*s7%<-oP9jeP%e;XYrx}zrEj}lQnyDf_>U`Fr1%a2mXA+T&4Hf$RQ(89+E zuxuGg7Y4zz77YW$FqS$!y9UAj)jB=kLIN1ag8^zw3)XL1#+6&>^bdlOOh=v`Eh$yy z`Ff0c9=0?D(C~CfQ(B9Qi-yC*$%aBhSqIk}4Ynw2Ko0d-uvW24Y*OuycE*G!u=0G!mIsiy=BEx$Z(2GiB#K7V^mb8 zk3=d9-Ux!+1|(jLw6+gDGn8F}_6>ueelMVQ4TC_?KM8g|fuM5rgb9HB0XKpK+@HoSh)<}Y?hu-4Tjs{eogx0h@Rpg+JRU=#r`6u=sw z39tn01)zbcdvD__YtM27zm_*dzYoyf>&BR52r1n)Q6N~Es$UNbEW+FNX37;_uGUDb zX)_&}>{P9n&^}&xzQM422k6|mmOd}dnX{=$BPRZlN{4(#^Fxe25^PjyG4OLS4Fzwg z9lBXoCQ>yF%I$+;!q}LwxTJE4K>;gTF*~5Kup?sbeY^r{AJG%rZkt~8li~3#$mYlF z__i7nU{L*aV_x}uSRfZ3dmoxu;9G-7k&gpMlQCd&#T87mPoea%oJ~2Texd_`icMqv z&9B4zVzV>5!PU^bK2*7zJUU?_A$^*~?V;iU0=k0xz2S1q{R!tp z=i^vbd^a=iD|UFGnkZ=I7Z@;LEFd9md*X@W4=q6;yMDb3R;Cx&Oydp;V_{HV-kZ zd~T24!RmG+K==>Xi5P*D4`C#IvRMYHDci~*n+J?}lz`wlhxtS7NnCxaFFtkVyvaae zu~NbphFb=(uyVJfNxJ4b-eMGJZHvN|k{=pliV`er>L6p(G7-3G=JKdB5{biLn}avJP%Iy^m$z1Zfsh)+S5`+c=Uw`D~r%v-O&)>@$a7!UxsfE$bu z2f;98YUPQDI+mqDS(e{7Ronwn=V`-fcI3uQ)))NXG*k9=)#xb*fFaN9eBN+oc6)fm#Rh0adr;Pp3QB+Y!r5y zHgL7D$(xBlKrXwsA_LmfD$_QFJ>cvtKCB*pn_i^BV2}uu`n~C}8NA^6@#hj^8ff^b zF_m%yHp&w_3n4JzzM;?WFt|UlLkK{KgE~Kh(=2?e%}8lv4@S>4lXnvP62Zc}_ijHV zCF~!M8XI_6!9bw}Xj-2Y7N=jUWMph|kPh4BS#=^!i4##L?j`hPeH&YEUtJ6$27$Lm zkJ~NgCG0~jqtTZ3Uhxy6DZ}Eee7SK(7j7p1D-&(@gajlSE+>`Hh{uWI0)PP@5rB^* z+&@{PtHeiYx*pA)&z;9Tc+teDTKynCGxB1=;%6j>2 zJT?uWurwe+L}Ue$;0D$k8juoqi~q!ENTr`#XmZ0dV>^Yi(~~fQde`QjPCxg{cb_RP zL@NV*y0Ju9qe+huACdzV%U{507>-#jO@+G1K^iuSM=28DPoQWTm+*+^Z#5Tbu3!i| z;eVt&6=`YZAowsjt?{2G#ov6&8xoD7?=Zg^OxM0#F-*T6h_Ai#Rs$@*LN51z-?kha zE;3j>!m8!F-sTU(*9K%U0LR;T?;?)XI?z?*FK*iefdLE{ytwS+<$PeoB_O*oh9+RX zxIQy2#QVAWS7WiFpXpl?<<|4#a^$k-UcUTB&v^xd{CB0uN|%-jiJ*E1*gd+HFn>iM z;DBLBRrFwshhRab`|EsS^mfe1JI~nM{@%zs;I7XZPi#f*>yr0`N!+xCHG&Vl%`*}1Ed6@acFZ9+VSX6jPbh|{jd^Tr1I|K#>$1fOaJCfv8Bo_k% zVJC8cTIpZwCdtAUko;Z*E=9+{$pQWR+wYHOFLSt76|;iwxXF|Ru*(}Cwrst=09i>) z1PCZqi8S<>7y}RIre8^iA*A?9rSHHB%mXE71p+gBJ&Ftx4@Ix}Y2+xpe|UoT{sNjIrp^(OkmRbi`R;AO$~W>2|_^m1A1&AwL@C#%J`_zn9!&}3=aahSsE3Y zvC)#sJO&oi!2k(G%kTDI{J{X8-0*MUR0%(SrmhA6m?ZEQG6i*h(`Tf*O{R7MJ(1D+B-v zP6IGRdZGYTbEJs$J3ebTyZ`7BN4q3q_lbB2^`I9ZMgw~%qnA)-!2~4_1VG%vQ~zFD zSU)&16wY8;8Wb7`!Mz6w3Sn3u>5Bfw*+vLA%F$XE3*6=RL(B)DLOl4PFn!iw{p)I) z=Ko3$k&{0X_?-XR&-}fJWkQ5_K)q{TxYjD;sfR7!m%y5&L;^?~!Ci?^HRd<=g=1@p z+b+~Y37|Ku000QUA@ooNxQV>-4{Px?%VOf`^zZ#tRX(3F>p#g)xTvkKnv@PA9o|p% zUN%l_PYvc8Q9t`)9sjNr4)KTLi`uOHWtr3<>`#B{wl1+Lfg#UHCr3xt0VNKIbs%iiAJqTMib7AOv zNKl(?`cxPG0tO+ZlT9RCY55WC9L5K8rFyB{H zdO|jtq%0w5)Dxgl?0$0~9lW{&ezseqdBW`HlzL^DUtGVBN6(axL*!(33+KP(7T1$K zfjKC;2d>ht67<+M50$S_u=51ya7-jiTw)?)Yla4bR@pNc{d&tS_}EQQm_>lbzs+`8 zV11^=-;_X~CzQrB|2v2lCF^w>DaCET?IK08KplBH>4^E5xy_=H!`M>e| z1RQad2NC8n$pte!CVA8ZfSO<-V{=qSFw zhD8!qKAzD=(E3#J_=k{!N+$NU`krN4zP2vTjTx`J-;RNW8t;A%74pUoERGF!SnOqE z&J(ZiRw%)(hWb{yexEHhmM#-8RL+>vbc>TJOMZHFzt_x!CE<_3Ia6>Bxj!zyIILwe zYcZh_o&5{rtyZ<%NW{A@VAwmX+}PKfplBaK0(KWb`UmAMi`Hmx|LjEzj%FZ@tp8hl zswH=24ATp*1!6(#)GFvlxq$zFkDc8Epd@Dw*1nTwZg9c&=x=JjJieDDBuPm!`IpO1 z>cZSefxv-95T(G^3+OvwQ_`8==xZnP@VF~qIT~K)henb4f9-J&IsV+nFL6xv25o@q zMgBkWL&4?K@Nq6*+zLftFDJGK2 zNdxr}K+rw|AunHID8?H44*FFPW(|X*IU|yQ`G@!)>`C0@WFtOr^ImMkkp9A4GT3=@ zJVPE@=kvBfh4c-M!MujtR0ARzgAXgL3D3O}5FP>txdY!~R{;z2@2HZE_3uU@+sgDC zC|>3u$bsYK4=5#k4WjrzRE8-OD5hlqDZfKPA_I!wOQB=6PJOnb*G1Q+QG0dwqM-|- zu{*!fUGU`VF@(a<2OsSn_ua=#6t=JBEEbFhcdTEJ%U*?@_8QyKS@il$!^52pm_FYv z*dYOGC*|a89Z$))=Py%FfuO+j2}TnOGY$w% z8Y6P5cnL7OC?8G|7)v0Y4@UP|aQq-5Rv&E#@rN$`x24Se3nB5Ewy?5xjq4T>IgC-q#W$A`P*e1*T-|z{$&q{HUzG zhXQTxIsZrA;)cU&QnCkvp-R1hDLPn*OJqp0{~;k~F#rFAqb#}&gS5=_=$_9{sf1WP zTlhpu2f^_o649gPb{LYTDN~=5b1B+K`~Hbpx8L5HV#|E;)CGGa7V(Y zu$B%0MdQ$zBmYuBU%{Kc9E2Hw`d-XK9-JLs{I^~WxH9AK?eYc%ELAOg~)(!_&5^8W6zFet+}>-W7wVy54g{G>5OFh&NUSo%z7iQAUo``HNNlolw2 zHMZU1fUZ_0y|x8Uu+k|_2i~PKqcwhyQEqZX+*md6n(``;wfH^~1SCi(^L5Os%*NdS zE(Y+5i+;a=>sL$TdxBVVYiGnT^bO!*V#Z8H1Lm@oWJeb$)A&yK{7M3Z9P;=PsstD- z<_ipfICgoLit%_HStcLo;Bjp*tKZu=sPUdgDJvhRFYlEQT3;6O?i~i>!)68nAe9Nb zhSc9oFVEv4B3a%miv9QcI}7^L9G~u+F&5wXK}}}f0ENXihM%Hr*%R1_^T>#h z9GRDMxDIMizWKoFIy}DlZimcVT!X|#7Q@Rc8{*>_L>ReSL7&5vXb+`C*)dmA&u@qXiF{(^nKk1}7I!Hj}YkS@-{>Ew3p zGVAU23L0Vj0ngqCevQ)H;PAj`KcP!k5^^n0afN{d2POLqR7OwrUvdwBH=vEiZ^72{ z5ss!5obk(lO==4#M8N_3NgRfzSKl_4sBL957UkdGFSNX1i{(R<$52YCC1DlzN{Zq5 zL{0XbI)?)}w{#5D^&gC&KfTXP#B61%RMrcxo!$T2`FekUTNFMCGl9B8(3~w~oX=UO zX40qNrXQw-4~Y->5&pT`U{~-Pe4F+j*?eEL>?k5^RmglxnhY?<0Z!^^N0#xGts}K9 z%~#S09%Eq3?|a`hcUAOCVMY?(q~oA={AEu9BJGabIcDPrR9%Ti{KxG&<{$s}2LCXN z>1^J?-?iqBPU5$5uSP4DlK5be-v6Xa4L^oaWl)yaL3dEuD>(v#5z?;*|yUBwnqlup#ksp@r@lCwvL(&VKMxRBoa<->M8K&LA@zWwH4LSUZg^^wE{J zFq?nOAVy~flZo7XW9)A7u2vX|EhmwW-^wwv|_^K>Sg3 z{l9U72+CpiQf~WI+sv4C@D%i~x|PaSeIsd1B~Z6M^J>Qu;St*hW`XD$2cGkB0ey=Y zAlNm(mMDCDs^|L_8OVD9S$$le?4CAdSOtzOTj{gYcytv>p zo81V1TZSujU||?zIaT%CWyHqawHGtiVY+2&{Q?E0!hnPa|2k;}6BWxrmVnyD?MPY{ zmZhPDuHWGWwP_zlj|*`xdH6cB0CoSz|MKDS9pS~zN&*+R_sApX-!Dqf>BMsgv&8?QD)_6rySvQrC0>RY z%W)lgWyXv_;cm?6TA=LwnsVbr<>x;yExCDe^8Nq-2+JY%SOz$6{SQ$&{%!FPw}dnQ zYOy?!&?(dv#|ypfd@vW|JYtEjAps)i>~NAR2gqh1tZd)Ps;EuQeVACHo4#G$163Bn z%(XGW9Q?fb+ll;Z)KL(T9jeEw^5y*H5G4a4CJeL~uyIgqAG!zOOqSDIO@H+*J%jvo z#7sw!5C)HBuuEe`DoFTZne)E?7GX{q-fKb4xO-R$Bm5F%1)K;`|3KZF0$iWEx z_HJmIDzSGoPs(Vp`?Y?SwP0s%!LWS+_70&Crdl|1Ex&32R*Rk=APV4CN6dgWu4vMISt>6TFz3>9jdPXRMw&J7K9fuBa_%U=+8SB zVAwO#nTRYO@mm!SVQ&TijKddMSHu|cQE`V3>FUzN4szsH${=#Z99WQv#<3*l5#`Cn zhA=NQATmI}n_zB~S?qkK&E_4viWI^3a7C$UIJm?>0~;F1dI5PtA46zhF#WH^?QMTI zX3g^QT_DgS!eY8UAUWAaGK){y`CZvY$CRSRl&E02`h5syB{@}BOIt9s#5$js_=6MM zyeL3`hB#Hr^Y_@O9g2sX~g~yt+b+gyR zvx(*?qQ7M)u>J~bF`BLc%THLJbZ+Ti5ukbof$M+E<5PVaLC=R(Swwa8?x7)*GX}K> zGczgT$EQwK|F*DHRw`C{vWjQFL7_rB1)&=&)Lwv0FcWOFy%~-}(g&l_uxS%LKYIni z6AB0vd4FG%_CbBWTEq$$jO-JXcXX!Y4gW!E5DhQ2H<-$nDHTVDheZL*7!5Jlf#vxn zCQXSS1SZ9SB{UIIwKlgW^TO;9)qqZd0=K zVEXZqbPin6=+Wb1_^2^Zdk2|~kyM2g*H9wfE0ypGgDHHtb=$^>-{qvO_e`QAND~x4 zK<4j%A4!Tw;q3^!6jUsrA{vKdb9GA# z*%j^ICI2CK!Gd9tsLHzbiQARj&`pR4&IpJkNa?!fX2-;9_EQhcLG_4TI?nfmE5WWh{)7L|3p{w!l^3r`imT54(#@9^8x`{;7nW=5!wF{B@Ld*{s6+SnPVBdn6Ug#xB0Cfl&FN_pCXCC;mdTpSXdeOf9+}5d}iC{@vfK zuIJ~RdBRG}rsiU|^p&A2mHa{b2fO=QmXkC9J+-yU0O9jhI(sQ^%%%bU%+~t;6}X%m zP;~3ZRyBiH!06@7IyAX+f5)(a2X__03yW?Doz(G-12ukBH>=PI`8*xGwcIC8<7A9t z^(~hBzAG;+itj^b`3!ALlN2VDW!X)R#T~E)iYImC!_5kB8beM8Q9d62`D+kHP&)u7 zaU}snI3jw$I0Jk)j@SkR6*zXt^$b9;1CBw%+fUGb#E9FT9)q^D3jd;sK>m?Z2mD*L z_I_T>q`Y57`dOi{W4`Eug05kBaZK{B5{-yw7!Vf4371GJpV{eZAwqT;Se2Psvco|V zldP|c%@p(z-8hv*NT{!%(0FlK82xKsdP9oSS#Ruqgfmmwke9fES|KJ9DHIKaq19zu z2bJ7Cj|Cti(6R&rp#h>mjnprjX(wto_&yz71=O;irrKk$+xm5Jc9dt~Hle6Bp;jk> z+?Umyso9eC9Z8 zlQADpF6C!!qa-;n*!StT(})~b-F@C5kUi}fy{wO@{l!7-A3*xrW)%j{m%{+eJOnWK zGz5T{5{UwUHQ)tY+?^JyBK)kN`D_aSsh|G8R?Xo+(ijjSJd5B6ct};83+Xyx!gatI z2z6t;l*DA`>&xasAuF|CToj@@*r^4}73_2`2l#;)py+I@Bko3h`Fq7hR7@kLJ9vHa zKf&AV^4d!W*&Ac|!eKK;P9FoNz2zVwFM8pj0^Ni6;FL*yfVmZ1JQz2XErK9U92QzE zxp8Xy8B&w>gmr!5BP)!*YFdIWu&b?#e z+WyR14@2__oAUNnNH<<>eW00__wnbU+@7-La>{Nmu%2W@K%xNXsF%au?PU7CzYQ3A zdmX*|Rff~=^i^mi83#eoMeZ@|z0N?`))7FI6asbII-l8MU4X0y4q@ za328xPQoj3zY6Y*cl-DBGAtRBJDlNszy<&KS^))7xqi4Z&%*)lU-Mmfv||eePX;M5 zb2CGS9-A2f&N3z(EdAin+Bq@CWxv8A48~>-AU(|g?ei@{W$=N-9{hy52NQ{2zyJUW z;UV{21-Nhh4_BT&6ky1|HfA%eAm5>`pWdoMSNg2{n7;`4XO1r%ScsyVWyDP&@c4|sfyyCyMIVdRNwT=GOPf#I|7tf^(A2AMmU$@E{Y$BP(S7ighAFHp># zS)BiGlXY|>qer7gRs=tnpjv@^DmV&2usjnU(U#9g(AX8SE!c(zMB#7-wi7?DsQ_*n zW*=#oim^6^cuLfRs?S~Wolgg{oMWgTb^V#7QU@C|vh1SM0xz(E=M@Cw%C;`^i>)m7 zJ4=yXRl5(23$WPaEyT|Guf#rGQc1C#N&()LWw$;wubu85Q@{G*uZraHsz!*uxuW|*`$P0&}Q?oOHgxRcl-6=J#B6}UR|+Zlqd;` zhKK~Au_MMBD>RShIJxU8BV|gyOXdqozz7BzHZP&4RhBrE;D*!M%Ovl>))m8RC zilO!_xibp_h*}c$6N*foA!;kS-R^ky^ALP7|F@Ff-PdBMXv+I4b?fCq318BpxA*}W zfIk409x)0i7X>iS%T95!A8T%faWU^S|8RCvN8p;k@cnm;H~ZSqh2SD0H{Jgcci;3P zLPRB_NA;k9a|J{QS2W#@T8&FmgW4sc{k=MyKylS>2Wo3Q7bbVoOJ~++f81fxR+GKI zPXFHGvM~QHTQn*s)r~FXyFi&TohAe!BnXNMrZLlFee4oPVE4-IJV>@=E+nE#Bu^|) zo><|N@X$+vDkUItSuNz9Pk6V)JiXo1LkK8$9jB)<840BLm!O|D>t2|F?TdFEkIVSN zAtpgjOhbYe3m6R5d!PTO9)1O`fak^T$&$X=mh8-=6df>=kRS|&2e7!j51+6>Z%*vd*1^dV?#p)uB2k9Mp@Ps;&|qR#ioZBS7>(ZsGWyJM|FgI? z4oDQ_7>7~)-Fpdv?E$A8t1a!3|eoL*wL9z1Kk;ky?;tCGP zw*P)w!ElTjlaLi}7FpYBmGvSRK2h$n!;4{-r>b6XN{M^a#}9&>>1{yvvtf#$l|PAr zUr@tA59T{NWv|}*9B&;2AC8_altAnK0b^z8*gF{9y`y8|$ba+>)R-qHDY)(=ilxe~ zqX2#sSWE&AEI_zW_W>*2e!t~2-;U-nYg?nxwXAp4H_%@wmxj)kX~UXiKPUU;AV)C6 ze6lGmv^jTXJ4Y?Bg(wRcwpa`bJ0n7aB}Ry#(f|r)k1|)`?ZpbllRQ^56SH|=r$1IM z-L?KN!PS^GnSFmrG!r68cKKbxqoWxiVoWSLP&7fG;g8jeSwH`EN$SHwoEIQ)H%-Yu z%}7Ynyh($Zgth)SWprp!Z+n+ab5`LbF5c+wT6GZuRl+bPK`Wrc$?4R~KTGfNUidp_ zG*&-R9ZkJY$~#9x0pF8%&B&NQgpKi`gt*68{wJ^8P2z4{AbujBN=hETv>F3JDa zT|iC%X~w~5ph!?t3sZ!-qDsB{jnW@qEvJMTj5)XqWEEpJ>}8l{eV`m419#L>);-n1 z2!@JtS_qViaXO?kpCG+E%+6*C zf0cLTGw2dX9-J&qgOMrLXbCX3rh>_Fo>^v!im7NAInn1VGR|1N1scmd`Jl z6s91yG*3$Z;lM7H|ZW`rWMdW0a|h?6}*f*K|)7MUv2F!XN4Ofbh4 z4q>J*-7q?xHSqbEKcm!VmHA4@r4&)e4jI)yp1qIk60=4?a9O7caKz0Fi&_e;m1|)ma68p=Q_og7wd`i%;dRbRYWg0P1Y!e0!em> z4Ie5^jAIT(vW5p3FA_9M7t=}%lx^y%iaQmr=b`H`D$HC`flUoA%UG)E>6h*0x)Kiv z5)vGZ5Isgk&4TkFcqs@VC`R(TRmIIc%}T88mnRH3rp4b`dC|48eY&e(5{I z0qvQG{ESOkJhPS>%Xf?o6r$`lQR6S&+rV(MzjnX}cc?c_QO%}?h=~MxD!X%N1HRC$ zw}r?qhj&&X*PAu4=wuv4u|RP^X_UmIz6lHUR|+F=z~%G@P!|J72kUiUN-#7KIACa92F@{W!=e`KwgYT%y$9D&h#VIcgvokP zSaJj+)i!uOoWj~#3lKvpv4N~$b zF+g!gM;$Z80g3|@Vu0=6TC5?)`|HhudFrhS78WQ+udUy=Lqa&50B-R#{KPlv^7}g| zb)t%ZgQR1{A1_41v%N{9q3sTd1X}&8gJBddqMwJ27Q?uBReB$n^M_QRT{JyRjQWQ& z(FZ>HYlC5tqS1i}GYE|maDDREo6YaoOccU`2_nWub@Hxp#fDBIDkSJVyT1zHI??Mr zfBdUXQ{f;;1W+-6AaD;jEAM-7mlqhW@{G@DKwtm>2tOhCUG&8fZ z$It|sGx%@!7YAW$2V^2{AwDcjlcBQ#q1W>Zj6XC zr;&sN_6ChVnpz>50wvI|T+#F<|9&A|b9=-s zmh1>YW06DNMZj9N_LlQ1Tw@9U|$Uj<6* zZhh~>6{i29_GoH|6sC-kq15Ta&;Wghu8;A560j+UwwP^pBSk1Q0|14O4W%+7nKQf1K^0$6REZ3A2{2RgO^M` z&mX1%p4FO^KfK@ieS!n?m!Z+$9|)QG)KK%245nu&l7Cqv34*iYQVjw-6Qx3j?-^n{%z}`5BR$7a<3XzcUo}Jt`|3 z$6p;WUezwF6NP;Xn%sOzgr34gC4%-YKtw_H6$bc;iEeFIk7NL3M&J ze)b}I2^tm*uxF=#ABHjWqj!-J&hRP>B@)`_K$vzaDiF$}cA83o^zrOX`*!nuKf$3- zfa0w%UJe1tk!9pCo`ReI>?Riq<>2eEBrIQN&7IjhzGV~fC(C$HE)y{sDU2u{s(%eH z=bdmJQy1)2+<^DtdAWnPOMMlLg1RzzE1*;;fkKK6b+2S$0pS4Nyimx`ZpYCAK?RfT zsMR0(2(JLQ{F?pvfVPaQZ_az8=_GKlzOr|fF&Q}kcR@d!|;W2ag)ZBr{dj3>X=fh&W8Rl-By;TyN&OdSt! z!Te7z`Y#YcyPvNzAR+=1Q%X?Pg9@wV%UDsnp4Ga|ZWitBitJ@$r#nY*<~w-hU+7w` zHrJN_T}5G(k$MzqGe&4oSihNg;6Lh)*U1Rhk@za_ZW=>j+Ji)y+Kq>GlQ^B3zUr$>>R zHEaDHUm+?`FmxTX(9`jB=w>L#_eYPEND?mv&Cv8V(Np$;0_oedc>RBAWw!-@q%aqG z!8j%ig@R4SO2PuiF(}*f%9Y>|0|a1(5IOk$t7vs8wt+{%%{4+{?B#h^u;rmxKUyqN zVv8300(=Z?otirZTtt17$Sxy6m@&AJ>q5}+8$UqUHV>}KDTC^|R)vI7_6--cmi&?v z#W)VsmBe7vng>y)9^6D=k7Mmv6pPBIycoxcHDCOIqc9UA?p9^>O8{2j&|>+xrh(Qe zig0%0M-%=_^!gI=Gf=vUYyBBWilT;;c&X4ubNhat_C8i9@8FG_>gI}$ELOn4J6w76 zWD=r5<2PU~8;bBs+I;T}2i`2S+r>yy2x-a`H#f@59TY;Q3o?1Ke&1z%k9Qg&u!R*8 z77zFu?K8Z;%5(Ya8DDJ`QcQWa1!Qhn;(do*0ZeEpl%(`ZF>pq7-Cu!b+(M&Dfds!p zUe@2MF5#f0GC~$ux9AhaVH|ja_t+W6Oj}qni?3B}yEcdR`Yfu^@Tejdf&t3lqQU|f z4aQ!8;O~x&;h?%z=}YiNcrzPKQY;*a8MDrX% z*-Mu9miYeHFN`3viWe3*ojEb>6Up=hK%pY~2g`-eMOKicfe)qGb$>IUmwy=z4ltx8 zMwd(|aRQ6Y@|)&P5b(!ug&3^yR;xq$Laaz^NNh|@T+Cpn+zeVw%BEsRnMOz4ihuXj zznQ7c14<@^3F}h(k4bcHt7Qg}1zYCyN%~<574PpdngSvsB2pA(+JaCp{iQjJbL_hK zWDtoS09eoz<)?*qzyJUTaUuC+1-N3j{)em7d{X0yRq09ot3M|;FI?em@V5#*;8JlB z1~!#@5RDH1Y}yaOwr}*+YV8V&;=8*W_2)9tPpm_Ae!3*0&2@sRtIEO?!sRj5=DROJ zd3gvWC6->7O8*0#L=Fv38ZlBx%W+9az1SdtgJM0IXoC7eDADNsQ*TO+s}b4QFgs0r zzLE+BK^24$cG*0F(n{L+#4k^h+WfIJUY4Am(gLJ(xLjewXE`eWC%-}^!skoOKmGa% zOiC15z;FBCLN<3f2m2A-Yqf6LfKje=669uRb2700VT`BKcnPa|6%BYIx^ ztHT96EF2*Ogb#>e6emdccI;!1#A;5htNDNH7UU$s&|^%ukXSmvuH1|0y*Q|4m0ouT^8N!=@!IvQLSS^V<3p)KK6mvk$apt+`D+FD0h6-od?j7rSs0kkAvAQGy3?7Np!h}AOA$_ z-th5>K*q@)nZ%fQh;BX>xgA0c3yf_`r0%g6UF=B+AG+* za{@DP|6*Bq9d>HrGnr35S7;KB6oexbhP+^a5;WHz<$m@qPq)VyrP%Vjm#n_@OU4^u z{m*uOi8_AHZ%&?9RL5f;@nms7#0jx;*T%JiwGJj3-FQmZ?n4omQ?N- z4hn*xWX>e=iV32r)EL@<@j_X6=<6G{kM_vNqN13=0`O;-3r7DaKnx=kLt?r-gor^~ z__JB;`(0s3zT10k|MV!gaa)B4!X9}3%3c0AQ}2GFLW98}Hm|WG9h9s=s)X}HvU_@C zr2VE2|J9E#FRa94-{GM|?7aQO<;=KT)Xd(N&_-5rvWI8ZZ(w#T(hT1fiqD=7BA_@U zW^BvnGByoCfO4NgQ6KS3tQrA4A-C^y=PsovvLM)V4va#1z#P|}xzW5lc4Gzw*p4UM za&(CdX8+lp47|JR{=S&hdRjxtVDpk-(WOP=5Q4Kk7A9x3nR2`O z`0XokiL->Bfl3?4$Aa*{(No{y12Ne6>}H2U^MntpB`2#@GP%Fn1?&?9w(aQe_Y^i? z_Ii-|Np0nCqo+L~EQ+(kXg*pWq~6`%()N3w-{Ez7b1^qcA2d8rkx#b zul@1mw**|$vz?SAmbjI$!Kpv-C4bu2xP{T^^gZGg1JE=Nl~tuJ1_%tN8YCqsKbj&D z+&_qAzj>Jc>An@ZWnIsdZ+Gl&?=Cw+%9hvP(}*HS{S^phXnzmQl|>7@wfFdl(^it` z?9NNFdOF-kIU|zX*W>#(doq&0-F_l2jUHV7)h{=Rnb>`h=?@XIdhL4&p)ul1q?n%= z$99~>o~-dkB$`r9OOP)Z)UTs2}JZVQ;c%R5f^w^%v<~EWV|w z;=&ElN1`v@zt4+7KupRo@MW1qGw7 zY-_k|N7Y>Gg~w(?1Uxqb5RoBUc|DJT#OB~~K$4~<`*f)wybD;w5o4yi-d~Thsltqm zlE#3=+$n2W37FO`v0HF$@eh2fY-Vsmj&R})j5iz`TlO!n&-5@NTkpHYeGBGB)8)K5 zaimhk5tjQ)(289ozeiU+`{abQe?r5F`h2zHL6Bk2iVJW_c)#DMzPNMuUM)2Sp*;(E zSmYtLFgYWn9wsDEI{f|K!d0QXw=$ql(JA8ugWnI+HTT^LpXR47sX@Ww2wnGEdaM8d z3mqZ)Xa%_XBt%-OC4Zsn^G{xvS)viMkbK$ykyd_3g~5@qd3Bl|6d+Y=gu+7mzI_9e zDq&_+x66eO5g9Y-O1^t-Vi0k|Ew|o?jE&m`Sp(AcnyOaAImtBWIr=`eMz0 z8Dd|{S>e_Q0gE394BP1*bIQQuO0Tf+WU@(t@qB*JfT*FwSo0%wPGLjf=sv%p#V!uA zRbFl|e0~pIP83t+I$B@)swUTfg5np0uETM06H}uWZ1gq=;H;f44z3M_OtZO`)X5=) zuxUJEHgLlIeiWiImpHwA}m!RDuL>guIk_pjOm~2p(Ued6Jh@; zN2i%*{JcQ?R@|*9vIYud6%!H^xWreykuqkaAg{u!U9T-k0kP4Kr78$Kcy)In#B?Y5 zUn>v%(WbmTfi;2k>jv?C1C*5+Dd8UUXvca}F(jUEmH4z6IfR2k5OKH)EY)b#_Y1Xk zj;1t)1tN!@NZ||m+Qt=CNB;BH@^G_Y}GQD>tAdkjxEPT2gyv9OYwbI zOM#sm$(~+JS-@V|wyk&%CvUf{7d|B2tVrJ|7I-dh}&H7{Ii& zk_=tFfQa?o$6>yQmZ7=N{23e;8c*^oQYq`i3P!4R4V3)tMY%q{SKkrw)l``(_>_a>ujm>T%`}A$>V+%(3Slr}=)r z^mU0}S$gT@QC*1OfLNV?a$-0Q6clTaA;Da;9Iw}2#1eE5*+L;uOh0}6-jy;00V#Mn z9t)!Fr*zT`(D9q(s$G3~Zp#?TyLXv9%=n3s2E`i`C*z$Ctk9t0m0WbqJc`36oDi3x+ z)zRkZUKtt`7hd|E~aIprWqFz-WG5huY2baEzZZB1ITc(aE9B^q55ETvBIolb_JJclC+rJGy zQiM+d2&6P#{+4y7}+p{S8=L(v9a?r zPPCXrAACMoh}@si<<*iQEk0m7$~KwRfE^(}>pFdNu*&W@ zHWfjuR@g!)IwIM@h+`#h>q5{xACV~>WM4*)NAr8&&Fnt|bllitQtB}}in&RkY`gcp zF++#mCZ?QCCgzl2{s4_n*L>fmI63Jm8?xLX^_*Ce?;v1huwdyjjua?{D+_ShcUr<> z@q*C)RXcI)`*!A!{k|+9m|0B6ZeKCI{en7R{wUu;6?B5~25U0yP$s>Oh?7<{zt)IC zgn=9}C;#1}&Olke-+DWi-8!3b0t2}IJ0tJC(x0TB!cdqY1UbLSnZxj_ucVqPP-q7_Lp4H>U>KP-*@oLlz#-$Pfv{LVBnN#S?(a+ znJpgpbMpqQL|UY@MhQdF!hh1-nxW2@(H*^kI{X z9&kW+fvR;W&jT%yRv`Ny@Ob0O&eFf-ZS_$j-1DJ=pzlzx_~&i6<4zD0RW{S7PTp_( zKieFJ-KTWU&9J=wH?;Wee~J@-K*!^C6ef)rKB7?myJSb<_T_i7l86ilbYQcx2j2yF zl@Tjq{KTOp(s$1YM(sCoErzZ1KOdJDD-3V)u$g`#GTf=&;GqHovlyTe0}NL@s~a{X z3m{^+4-wP>nn^$J&Y0FWi)fK2e=D{KQ-u@Z*qEUhWK-WNR;U!US%HyJZDNJMPBhD3 z^;9}>4pO}QaAWMBZ;G=u@sWpVuS35<8vVtT9j%_tpK5`)Li`=5YBS^|+ieS!kx65% z3tB(1bxjJHcP7X82>3XzY&JDhNBteCBZnKyJ`RR|n{E}n*;7b{Ga!f*(G-F}Ea22& z+b-kMr5Sw5J&Yw{WY~IQ416QSsk2p}o8!{68Cv3kaotgE2306B02j zI<~YF2ig^s^mH7wRxDIS*|fX|K+-WBY7u_b+DDNHvu2DRS`OkyC5S>i*P8TX1lB?` zW_mX|tqUm$Bhi+fk77Jwa=qiD!U6~ifM__u7B~l;$D!a~e#1F@RgGw;VEsZt22*#k zOdvql+_0tD-v;y5ICX@O(;viwEoH5YG=;b1ID))LRu3+Te0e~=Hzl$~HU#6V9U=^SY$R{ulSyzPy2^MPzfzDQcjxMaCsDC9IIG%z+09pK9{jeZjjmt}?aK$uyAnLFduyKC+)4kT;luq1$#1sm;v zhRxOe0ijH0&?a6{U(%&feWg4#hd&Ega$!B?djSdol3{ftLP88EoG3&I5k{any)lx* zSU&Y9DqS3eJ=#l+vD2jz8y-|Y$1p3S1Ckva&bmY?(!+UB@^4G%A29{;(6rtC9a(ie z=+LQ9SQhcEZ-*73_Fjo%L`8%!PPm@WlKF6KI2SN+f9qdx(Fw}lpCO(f!dSBF9h4{( z#EwHZzkAQHNcU;21W*04Nr61ivI{R;kJ*8Fn3;w%q*FO>g-T> zsB8UJetuP;;V<;m+`E67TgLE6_U-Y0{TYANe3ZKH*`oPB{onSizWOO!h~H+|e-eBB zjdn-B%=V=JO;V9!dA&)?UYg=e^g8v*TlB6|c4q%VSF7ghT(#r7NppXuyR^amL=#8W zn)k^13`ZzKsd<8Q7vv{(ue0e_`l&g+^FNc_a?twkd}l3(^ics0hh6`miip$c&&l1l z%qoM#f2N^c&VuFtNlC+n*;n-Iie0S*C1m7QvvL!#*AONBkM=2h|7W0iIhs2IK-U!d z*WS6U@jTiV`AI-of6-LE?cpt7`l)-d@UOrC01RFs{BQ+0*)?9X%1J-a^$;skYNq$N zsl1b~FJr39{VLC?B4)^@wp1h_PWgK{KrJx?A!{tD4`$B@7>~M84xTCQ?{bjc28w02kh(R?N1tF_wd$pr< z#}Iv4o&m?+fe*tFcm?|EWi4x8BjiEl>2}3q1*hmNL?P>@WS;=v^D@_@E^Ci6_JUsH z>0K?B1~`aynpZw_#gfZGzreo{qhRf`jy32-13>6D4Ff>fKSC%P2hsBNhkl0;G2U9B z(kw_&p+m+6#t)+#YHT&qU)$L&U0eQPd``A_s6_*VzS~(?HXHuSEOv`9+jceg_LW%h8W?_>e{X+JOR+mVDF~J+( zecom=5G8Kzi}3J8zMneT_?y47N>x-cek{shhPYlJuuEwK<8CF4GEwXV6t;igqi&6Z z@pcdP4+DeCPve3f;V%GzKz_es&qtR`fgnUONjexPJEYF$HmOh<2w-+hhed~9{9w!n zp!HeQ%60^a<10pnj4Vmzfuqr&sM`q=X5Hc&0Y;t5Rn*AJ%)$;bB7Hj*l>uXp5;6u& z#c@IE5fp|TIgf?+Li?k&!$aQ+3l>}ZejolPzWtg>XO&zKf`35FxGHutZ-`f)^0Nj# zD}n5=roHXoq{Qs{?&Dz#{`%FI@oI8gusHWEkm`p*Tif8cWiRvP>_Ee#iGmqHFy>TSsXIC1N1#8_ z>G=u2Ig#)F2BA1q$=%;F z98fy8qp=|8nwCL>u_F==X`nuuH|Pm7u?vQdjRV$nBWFh;q<@e@I?<7dxZr&00x@7a z#Irg620g*&c);7YB;VgdLIWa1aM7>MK&!{0FbGpUG1u+#+GS-DQ-Smc1Os}o4S3S; ze+v}xvL4k~h@fY)DFP~e^Z88p3__-Qd>y`Meni@26u||iopb%&{H?i&qK<`<$&47R zZ2(7&rz&U~yZyAO503S{(BYpz^bG?fI`4JLUdd~BDTkrME4lo~uZlvGf$}_f5CF)j zRcc)=KO?dejJq*=Lhx@ha5;Ek>nk_>&WFnHkie?2xG?4t4Lhd9g4x2U(fr|%Gv@fGAx{$aJK8w?C&&p&EUo70geC$6XJLo8h-`WvWHGIZ)%q{ z75YxU?aW=65MnL?V>frT1Ex<59GiMk%+&=`ZecrC?{FNR0^-{+2b0!ePAN=8?Eb~uVBE;rX4sIwk`ors<&=?2%X$dP~#b3 z08R8ihvND+wwi1j=-jw#ms-~BNio2$y;&KJcY7`LUGYyw6%;Z)Mc9ZISPxcPgaqCm zX#hRD$Lrr!rV>K(|IrD8K~PrOvRpl@DB}0EPv{RW+E(6OghpxhK29UBoLKq>KSR6A zd){UMZHwa8DNwg`@7y9amPr0DplBX}mYvrX=y1!tBcaOs5XeA*$f5Y;nX0H)hN6Wf zUUoaijku`(ge%f zh5`wmi(RN1Hqd~!S;gJVY<_hR7C3qxhKYX7NkRiZd^?5*z#TT&5l~Sij20yf1_PO5a@>A)CLh;;S2qN*B90R4AYn~|S9!f?umlhMJAsxmvwN`u^Sk)zS0_7_l@ zQKQQ<+8^T1dp+7~*>o={7IzAHwFI+7%f-)G0< z#coJV0y#Ig^4J6In*k)fKpv4BF4*>ffkc^@Fa_Bp1NUkuY~Zj!gEQ>W0W9RoI)hEhF)VEO{f#t_6-Anqa^wxQcsy0MU;O`RT=fs})cv^0-qS8OUYF$f$l5E54t z4-4QpF0?ZAc=N?R&!Y+&8#ecq+&dNzmZNN7$G8UuP0{H2Ru;Cu`qld2e>`Fzr?Iw| z72W4!D|o4sNT!lJc;aM%$3GXsR~r~KHNO9Ps|l^xA1yrq{v6lzA|YV02hBixCFv8; zJh6yENn0VzN&9S5M;24Hv)1E8oa1qzN4KzEXV zA8``y-LdM(=ohatpe9dG;#lty>%aT`YYahW^moG*V0O4Fjj(shD`4|>GMyPvH-c$| z!f#c1f3Qfy#1=Q#UTDKi7|?*@l@KWuZ&PE{uk)N^`03}0(1jaT_irn?2Q!>fJAr&W z5I8ve4^sW`t67)8m^kx~ z1n{KPs`KyMZ{KP`s3B)(yPSgWO`oQ%K&mVnhcSx)Pz^d-;Zyy&djdEefJ_5Hz*zwv z25>!qs|8RyRee72J3GK*4Ot}P>`Q@mlB&W|5Vo2sTs`we7N2Ae{PJg0(NYQotIVN6 z{H9+tMasqzvp(E;4(4cXW$MIMd4?OeWivYtoO{dyl!PM~TN77nvk0u&{!Q;~!-&vgI#5v+TW<|A@>%_uhDnq#v+qrt?ctL5|OtCI6b3wfPRmpZ_mQD_NFe zNpvzfkA~R7jr9f_6#X0p>LoyH?)}HK+GPfe%C4*KBQ|S&;PT~=J7NaX3fAnry#^qP z8H_=~iojs2>6sZ%G=kora`N~BOaasmiVnd5JB$zrN8d}j<-IE|W{?vVY~Pnn&mx-` zf4n!*0z|$Z?LVZ0MN}**Gi071^vBPbEj=gZ4F#=>(v`h?8d5Zd9f6mhS> zhew#CK~XT+P-0Hr1zz_=UD*HJVvL5jGDnhPrMum7w%Bc=7>6j*w<$^rU}}=GMZ6Z- z=!Ig7W_@k3f`U`pDlUr(8+hoZzX;h@N#)x9SrbG2U$A;Uy~{xVm#}{q@4k?XRs^5| ziUMyaK0#&Z9J*ZKR}OFi`v1)~8f04C;&_ zBL(MRnAA3f6tRM?J>$LpeA(WX$32WmVvFptk=+2dF(`N7eTwbWPB2N0D~2rk1xFRp z(&M%MtLxRpcfb}UL_J^rU=50H;Ss4FAmQM^22*sfI07P-#cs?P9XJ`p#0Y>kv~!OB z&;VA*0f~3<>=A_mBn2^C4*KriUaFUBga-|~BR*7=1o#Gu34u5uLMtq30B7o8dyPV` zxB#4MPvfU@7B}~lWzT94%yDYBb^1%yvrc1Rm@U(I-^LDl`6-;Rh>UqI#P;$ha2 z5q==Mz~Y-gbvBRoy>%u+8(us@22u|sKF(?zmP-$j3(SZ0k^;nTC(WDO0XXJF00ag3 z2?`?%^zjSRM9t3a@BZRg`n-73>r}Y>snC}~S=E3<01|jA35-I}GuT{-Nj1S?fK{3B zh5bt2o+b{JC8aO~QduHKh{gn`Y}D;u9i*LU$@ToOYF@PY zqv!yO1b04>VgUU1usSw*EWR#V^EFOU!@nHrFpL_VLoFMBlu@!LOhR|e?EfwL-q8~X zgj3V@^bfcqEq=@6+18LMs^D$QW}+hLdLWEQ;l3kow8apEI((0?)7AwK!!}Yzl`e^2 zZ`L*wz4rWMaTv5R1_SJt{fQ<*&A*vmHb}>uxBoNfADdwN0yJY6##+EyF^_5ynS9H% z2XEBbLG>7dC=v=@-CViGkgd`10r(E^9|r|)OB4B&`H2nJkzTDz|3`-dB`W?UC4nOrwhhkaR&p&#lX{%NFDCoLWgB{Sg(9cpa|4w1imEV z_sXJB!PH;1v)&G6_ND9np?=n4MkKi6Q)kio1j}miWLhw!F>qrh0joIg8^g{_jp`jM z;qtE3n;>%DX0ECbE*|02pmeC&OZ=q<@)_-U#uL8h5FFhH!4Hm)m%Z97U^X6C{0>8E zOmFj^v6;9XPM(`#QK`Jh|5fJ1EM6+a#X(c$RV51nTmYZW9&w|Fz@+Ywb>joRcL7KG zUcb*Ydnxcu?w~8mvF+@lkRTEqPX^wjy$>JsZR3LnXdr{yQ^xln?aOWW!jFU*Qec6^ zuy&ev%UnxUZ|}u1-92z+Q}NR28 zaY8}@CsGyq)C%%UdHjf9x-FIBS(b&<5-y6}*udg)VgHU1-k-7kiR|{swtZZr8(Dn~ z^np+6tPWk7g=0qs-pKU%iP}%9n3}jekB5;l5~Brv4E08zK;Ywf>S%+vJQ&LEE>tdd zrHv%fD*!I-GO}YDnoPjZB}BuN379!Tc}B4nC%Av0H~5x0d`w##A{d3(Fm-ZBAUq$I zL^u-jZCA+mE#u)Pk$Cgh0EBAP_lDACATKY&)ui6kn&0Ee!uhk&d|`#uIN|r>BYQ}J z*ZQBr5Q%jCswM}{#h!ua8a~8uhL0%(bSQiWuzoKwDm?=BUio{+S|SvMWs_AB2BQg< zscRgGD1s9T5Hzbo8pNYS1b0LKOweoIUv|s#n1L!`5`i%OD{cnM7%J(V{`@M1_!N8j z=qjv0hJM%K5}Hr7E2-#ob%`m|4h?2HdFs>u2oWE>t*rxELKrd2gdA7PcyU`mapu3M zqF6aEa^JN};1WX#Wxf%YHc{moDdVFYCL6L;}P>;wV?I%P~pWK$#gLX z6|0Tap_?3VTjtETmlfz89`QRYwrk}ps4EJR9)frUz<|mfoy0+dr05}1N55#46>A3I z9C?K>lc!N z4?;V?VCY+ImXcs3?q%UAnk5dxJOMRoBlO_y%f9xmvJUGi*GYE7jvmEJ z&{_{I=e);B5ylPLJ;EACwd9k9{P;e-U;;~i$*^qpd zska$>^Mm0eeqmbtTy=?Wit{A$i^2DkhznS9g$;gAVeoC!&=p|PEchhtt9@(Y`TxAD z+A$Fijx&n!O)&w^6jL?W@sDqPhxR{=D%(#t@jfTe=bTthB_(Blk}A{o6*_!RzQOjw zJ3nCi2lUm!Sc7}eDhFW^!A<%dxC2Mb3!H9x>`@~FooB)Qv^=&607wObVz>GolrR)R zG143A7x-%yUuBU;i4{Ds-@P0m2FYPM1?Kn4Kuky8f3ELijnmkr$_aaq68;$Iv(MX` z>xl#{IXQduzATIGL;faP?*ITTDkZoO0MGybf^f*703g%r`2UAkxFmbAvdiuGI==u8 z8>2XQlm`dtB>zB$02@%jPpJl1A*c9$xXwWV;3*Dg#p|Bl;&jNt0Dw5DYyq*f#UnaT z|8L{+QU_W||NnXD|Ns67a0PlaYCZq>f0OwC;rI$ogu~;%{4#_1%GqF%wCnZ`viU#$ zgW(Fm1P#Xa(2xGH_yiiPKR!X?lmGm?@9+Olgy1FjN0KuCzu*n;VGwWs{}>_R9>fYh zCR9vN8}GjR|NrR?7pV1uXuFG#a7H(B@AUpASO zkNhQo1>#F6?K>u}-taywV7dOF1U%rhGTZ302qxekHzjQ ziyi=0XAp|4e7vA3$e7ffo-on z+*=EFLoff=|KTsOUHM@3=3U%LBM>&;tt-ezFs*TkAXt>IOa4b@Te$@IiEa zao8gAXMo0cIRD(8;Gln$o&t=qE!tGaH6Z$%KYKdz^uj3#UJd^_k%6DzNhPG%ZnT zS+bq;lsB2X>KI$H1nc)Tj8jZ!YYWeGp2ohIo^jF5h4YbE&l8}TShsCQuyzUS%m4fn z&b;4u;nUy#@BAm(P<6b1zwq#-U=MwXG~W8fXYK#^Lki#t!;+}1w(oK%zd!tmo92eX z5C%HG$N&HCgaXhk$$I0L{2$DMp^ta0=9vdmySm*l!r(H>Gnf0a`4VBg(BND1%oC6d zVY8*>p_)o=2H-8uZQ0u4H-|+q~fQ8N> z@@x0}JNN{OdnDg6F<<}w2z(f?ejd9*6v$z)>!U9Y(*S3C!5AN|@4XS2i4o91AeIEm zcT;>%Jl)twB$xx*Oo32tU*9(f!P@e6U3?5!x_B`eItZCb?I{mq%PDiLL<5myTL+Wv z>j$>cU`?%FVOYHD-bk|Sn|osA^f5es8<@Gj$&Z>VrW61CpNR6`8o(1;)9bhxkH*qM zMQ^{L|KX7E5*?X8GmgLZpaXQ04wkr{G7@;UnZ@7#|AOsMFc<&?1D0ha{{Nz4DPb5u zAo9bi%+GQGph{3eY8*F~2lzD@31+A;SC)Yy!%Y8g@Hx>iPz@l|EKV~5%c2Wx6qBKW)o|u2M4UAqDT&T8n^jC z9^q6xB0w@77u*LHLO&z%xW2yV3F-DR*-(E!f+Mds$RGma5YYYq)`ZErYh1aI20dt~ z(*ngUP(YXwleXW`2|l*?*_e2!6oOGHeL1|5%bWt2EHjG?;pgM{E!s8!1ZR)5VRL`3 zyutzh;phXV%x$+^i0A+L5A^{+6tn;Y0KocyJ=zt{1@ixk&;U390s?>nZgGoBSVAdQ zJHb=jZ^>1;ZEvqyk9W2KfN=N`#L*AR0B*ppUeG{+rJ?f3_hL$D@as(hzW-jh1_AyX zg}?*Lb5LWzl}Rk>ZVfNhdjE)U*q9R0H;o1U+Q@t2bZ)uG&c!-wRcKDcO^>lQfXujO ztP`HT6enl^9R=?o@zHG|griU9?>h0`0$A9KM5U+^R(=2f59~0ma?4r?olj;as~Q|d zq?NQHbsZiI`f8I8fD_jg4C4#)89SU9nl%H^BuzGHNk*x*fiZ5jNaENo*V12JQmbvW z7sw2Ns89?fkE>W^-@Y3B0sg}6&~^y_rwrsSu4@Or-i(yNy7g3Jeo_NGWXy*+g|PN! zihq;jPJ#zb9_XPE4i1otyFaW05>yM6*m=K?OHx~OHo@xx9$QryKz8P+3_p`w@JC8} z0K+f+?UsR!cE@{PhxiBXW=7e*ct0X63ua;O^`GvAZ=wa149&iu|Gq*1$v~#03*}o| z{s7Fuc`YdQd=7eMud-y%|DaXuI%MAfQz0JtlK;Qw`&pt&*r4uO!aNw?*@MT8>P&0qXKVnP!`9W}Ush+<@$@?n_+uzFZM8!YV`Ip_#+ zT0xJ<9Bgt>?~#PmrsNf| zw5`G36`d~ug#6SkzBc-KtvWGK7U?)^2%;a-d*hvF*zhEUW5ksP9Vm)pEE-FBFz*E{ zgtwDc1ZMs8Do!^%doWVUmR3`$_M%6s8>Vba&DciuU}r!wcCT}ZYkaxcHcCKdgY<_2 zq7_`8i{=dU?k)ZF%j;X;kT9=n%UT0zr|8P#i+^uDxFXr5p5u6R5zqpH9OKFrm`$ytV8aihSB(k& zFVli>%+IAdum>uj-RR@hJ$L`{FT{6%Hb9^S03-&D^My#IvWLtn07d{hAl$*jo=FC6 zKqBxRvKo8u5SnPSnk)Z?0i)}2opcm+{@|&7y4BoM2kJ~Gi=kL-o7Mj9Mkq5=(aU@= z0RQk~d6ZjM9%?XX`73=54u{dx20Xbaia!c$%j*La+xy~&IFRz>9j%;Un{HKIR%M})r}yryyP7T(!xwWe~6J$+D3Dp zIGQpfRjPZ;0?1%zO9E=!E$OzH9`yR_Fcy@CXIsCeQNJQDh4`NRVbh6E#2bbSU|D)O4Jhk7g%HGuIxktWi^u<_@d?kfh0SH$Pn zWxF;`B#uf6b{cZYD$duzeuXMs4e)mhIHk|4Y`hkY^?R$Lkh;5-+UV{ zSi9#%!y;WmRNlS*W0ER@0@?4VMx`|n2dKzim+IO8XK!?_4fCA*QoWe262ta#Q~p}I zgQS6*VkCr)*X?8~hp4UJO&I-VA8Cm^_q|U#6RgmPH~be6{@bNGeF`OI%OR9A$?t&tgoqm&#yAruoYeQF~J1^ZKr#R6H}K0FFq+by7FF%em@M?8FeJ zPtmLwI2q8ZTf|wf9RCJ+PZ3z(<0ZL^H~;>g!EfdRT;J=6&ttws5dZ(u3=qJXkyZWy zjO!l^KAb633JRFUKFF5oL#gw8rx z3~Ym8JX1S@Nn+!gMiBx6_c2=`tU%*{>s1+;5z{c z&pad(CDI3HyV84YsfXXOJzb;w|Nn-gDwh0&bx;5NKd3|<&zA36>bkDf)BpeB(V+1e zrJ?S>(9}wMzt`XY`2)cNOX<7+|MVWbi}Pgh^M}tt`a|IXQay>II?|@pA|!Y>*0SCM zHL8#S|Hm#hj0Xb)%EtWCas5o)P6>a-V!d?zxfA>Ex$HtI00U+sj(HC#;s3l+>~U&T zw!*Ok8-E|+7R)h0M&36v2m@@%vmAWog>yJhjSbU7JoCVX@brenwW;}NUjG7e^}7x9 zW#8)GN7K;Ni6bv4qa#KJzy#%<%d(e-eQn-jd9mTQD4_J~I+p`=L(VaiBp+@&#c!>A zi3^0l%Qp)`z4g+;7%Apb@_+wyrE+n{BnSsOuNs(!>_8h4zyZ|2TX7E3eo`cglvl>= zJq4}cL0fG<)pcKA@I;a{kV}pK_kp-fUGxLkj`C*r;k~Y~#wzmH|9`m9me>-?djd#< z{*VM7uZ<3*nm+F|AJkvn%94)8#3WC zv|t1&UHx>RUXKW<5m=$l6GN_LngoN%D7g8j{7d)#pa*~fxi0`P1jtYZxa6|101O@T;FY%R2anAq$(+MJFmisd*4-GQMBJw^M z%Y0+s_W$Y~U!n(BOD}imVTa>tWxO$H&Zu;;Ffck()%{CEeA^*lAqt0xjGXbfH zeM3094L0b}6X2Pek&@70BR{jse<#v|3XMf|^Fhuh17ZSL5LNp*41iL|9F_d@^C>+h7r}`N%OCR15dsV- zg0)HzLCJeY@kwvGhR_Ue zQv)AR+!-5bCDh`$HjeyeEG}7G0|5U?Ww0;0TgWv+=}p9x!=j-o| zSldu035gf_60gT4u4*upt$TI2-T=Z*4XZjsJZ@-Nxi8zXzwKp6dS0fs7U3(C9IKX&M# zjTQ<_`fkrd4*l0nRBDsW7LcXB1&s9ZQd~j^w^r;nW6pG5-&VAI{>;J=d76!m>bAow zVjWM2eZRGc$7LHjF2%k=L7bK^Nf(VKU1;VHsc(v)&bw!!p^)?h0pYrZF0sBqt=XWR zi|@Q*8?7G!l$qJ!pcKphulM*bT(izdj4$A<)+2p`LEkE)i^jeZ#38+*#8cb<`*G#s zR-_+>4)cO(?+YEh`*7Jy`hQfe;m~33Hq#E_>1raKz4u#08E1q!hraguMF@3fB=BEfD3pMsCB23-z)>B%u~M^vgGNu zQ^kM(=qeECGT{SlC8&4IBLR$cC;$Bgao;$S=0d($9i}(ZBu7~P{lggnGhQU&F?SmL z>QPNY`_?f`1VA;2;zkol0a^zI^xOW@J7e_tJb}p(&(amz#=+J|AF&6F#Z36be}f;P zqnNoyqVI`msd8L?ZA-f{;MWh<{F%=0or8;{8o#(u6_PZ4fZx*^xuP~$|76{H9p_nz zgC;7EeENXKM4HfWeNZZ&p7t6xQ^vs%`_{GHTU9JKX;#v0ET+&midV{|A1f&Gr$TxnF}a8pzsdljbBRt@CCsP z*QoO$Uo8h#0HaJC>a&ASFT<%z9u$ zlrt-^C<5ISs8%k|&~_~eneX~E{rxOH6FdxoaDp0VHHB!i zha-uVNj0y)C)c1fYP7&HYl0b8`v@X%f)%OPApZfWOEM=-|Lg1DhZUQmXLxmmKd`^A z!Q27&M!Vi2>qv9W-&`7S!ri~xm2UWh*%b{fQ17nSpDRh}tAjSbq(?kCBHPctsXz1x z+_Hbz>;s;ZMC>gZbIzE!dHvxz&on@%t8TwuP3iR5T>%SPfUPJ$F)Xd^X3`S{;|C8% zreHsA3`+tPj3h_Jt~MDo-H{|hmIy2j6EF3Uh2$P2l3|Ph2oF^BAN&K8)daqeV-oEF z7j1Zb8JTv6)z2#M00bZqwq~$7KZB3(abWOSzym+%z)k)B?gUrC7~GYKtq#CL`S?dV zv~z-AmKKwF(ScZ2P17cYgz@ywT6T z3!Hwk^w=h@{hvH*1VcccDoT$g-#cNJh3D0$V9bU)2#l|PuV-~kJGyRSE8+0u6WIuE zD2ITY7oZGDX+d(ElJy9I!(F7`X8Gnz+C^)la?vsG!o{~p6pWW*K%yx5&Y06N9rMEA?5C*SOmO;V z7lK5d_icM@17iI<1g+HO3;yo%EKQsZ{G$bj=9b@MK7YLiyxNhn3W_;p|8Q$)pPXKZ%i z7~Fepcfs0h-0~=@3I^Zy`$ikX*~V6my)%IC5{%tDwdss?uj*lX?Qk#(U;qNw?^h9= zt&8t7L19!}87zrR34Q1h0f67UIoq(ZBfMdnz6=Z;L(KsCY~T>^H)_G4FlA&Nqbch2 zx>#idqu1te5Jr?0O?@k==NLktqhMJ9b6cEIuMi}SL!MDGJYHXE(gk=YQDYY z76Z?m=N!YC->cMr#aLy6=zxeVqY*=rW<%X*3mdTy-`fD$LJK@MQLt7f7I>(z-L?#d z-2ymL2B&XjGa!bC=iwb3d4hV1;+rUs$>Jkg&nW_q$RP=Mk!g?my+4tt*%ij;R{Hvu z;L2nR@+r7cl1h)h?e8?RAOE?|fCAH{5L;$|FoU9f@1u)Wy$s8Vfg#TZ1s53PkF?q$ z?3HL_=vI{}Vl>3TN%@Oz2|zyKI}?U}D$0vH%bWueTr>tCZmyPI+dMPi-Um*4~i zRJ(S3g`f}s3xbT?Y>Qbp07%}aWu*maL^q07@Ta5@toZ#pd^zF+ z(KH-c_^U;UEc;hM!>m4Fl-)57MS~d8W5WhOvQt~>h;5J#=;(4V;W_sz?6TqE3_Kuw z)Q_-byDI)b${`cSYdKjs9$)Fya(3$ePXp0-*>|?bYDPXWpgll)v6PV^zTzu$%J1<) zmYIQxJLv{jZ}R{9$QhB3+~TKb;547I^8ra_@;leR{YWA*Xq82`cpNhN)@mF`KLF}Z zSPtJiP=2XnikNph|H+@-uKu_H%Xjm{d;&(?Wy${h2<`gfu^UluED!kY8m@o*&5c@E zRe*%Wn*^NQsCvoud%(JO|3U{MB^ijs9b!y_s!KMJyVsVzh5!0hIQaYa@L;rXGV??+bK9a&2Gy|G7?MEjZ@@!%fuzu08wc+wTQ7}a=}oujgefxo z^b8QpGt@jQRBG|aw|RjQ20CTx1#cl`a$iW-STY6OoEKh-$lKIJ0~WO1nRo4j39Jvd z;Tq3tRAoaC`3_n`^~2+#4W=)QCNVMl^{ZDX6zv*VMT7czJIr(CCOp!gUR1*M*MJ8I zLHE#6?Si=IIMTzD?6x5B$W8^^EG>lOMZ3pJu5a8w>wqN3atX=q2NJ#T8j3oUC#L%; zCqMib2U0YGYt2F5d={Sx5Bf1{D^qau(!aO=;`;3zk~QI`S-GC@a15SUG&ezO$lxDX z%+nnvh+-F^w-X6R4m!b@cSVzbZ0wqa5G<2u$DMz`7xol)39-OHkx2=#{tT&=pb2zx zIUy4=T--9q8!-&D!BBKw=pzpYv%zjb3}YL{Cy)n}rSJ93RtHwE&joPws7xdHd-Ng( zTp^LMaCq@V9<>q`@P*EPl-v46@%eS@^HHkOWA+FzC(u~bCTRLwzZ@sSo3h)jQVRBYh9Z`Q{6iOmbH+#;tG=&~_B;S4aZ@~Ss~La)>@5Ur zx|w551Ttzrl{I041lo@6#TdCNfB)z$w95}`mL4_Be-kp1c`E&f|3-xcsF!~2re&Gz zf`7;)u)p=VvgDDSKz|qY-}*K4_7R?)+vqW>W)I&%|M;QUK>bv}W0^22E~J?>+kUo$ zqF=u|roD2+7V&;$^#A|(0%8pd!RH~q>;_rmikU;`Fz@@n3EzL(zv&8X3crVJhmDg7 zw|Pfp@s@~wx1LS*hX|AX(`hJ>0n?p$6we*E(+2p5)2)=G+e^bjGQ#xQHQClIDSr;%ekofCEy$rhKq!-Rd^GVC%gPaoX(DL{q&(*T=KwE|{8Ee~`~=u7el(#(aO~RC1y~4~_0t0ro=sz)g4q3|xijcruasvc4HW zWet0D!-f_doU`Ck^*bjbCKP_H2x44-F`!=V&DU%U&KbU|oZblmUw`jUMdH6(WU(>RzdZj_O*%h` zpQfL`yWe5b3KMpQ@S_eJ=M#aF&-C3xm%KC9zRiOl!jy~sK>%_;i26yg<+&|or+%Kp zXXIbv74aU>wrSjz0a+}7Hp!{VJ`=&~0q2xRKR6Em2embW>ZO=Rk!v+y|M;xc-8-|X z)m&*69ZM+&wFJ-4?J}#!oZ3BVe|rqBL8=%@{Mp$}r>~O)Uw)CQ0eD)GN9RA}V&tG0 zwtOf&*@6zk{Bc@)9C_dOl@zKW{s}xC?}^xLU}Nbp_iMBENCD7EPGk3X`|+pfrJOSLu!MpG{~ha4Zcj!&5!SY z{{iy{2&&*%01SX=C=;%782|#nc(eaL2q%ZXJju~88}h>tp}d4&Ci61{9L)d>28%S90R&&KNPq~+nasS-1`r2O z5U39^#4rHzVQ{w%zzmilon7rHs*)F|1>V_9HT>f>OG6>%UN{aTL*?Pj0OQ7#$`sG= zG}v^TQ~%~3001At;77BJT6EzY_?)~eK=asdf2$GFm;VOI`~YwyAp%7mzOE$x zH|lH$nt)xeCe{w>t;=IDQooCN0}(M`5eYt=0pWPO-GCl4PyLV#0LCw<2&&P@76tRi z<|C6(42vJ>;EW!d;2QYgytlyT`FzMt_>6UrQll@^Pac#;3uxHu>T5zhMqUowu}a(r za!bS7fynkK!vFqJmh?ab6a+v7+a4Rg^9V&>9=O=UqYT=2KC@Pm%(%i2x=y`HjMaD= zI{Qw|;fhF+fP5K+>L<@S~@h1Y6HZ2b0cSo@gkPeOQ;*(wZinqF1ME z=~Ql7*zw%yqzt)IcFSm##}%3`Bx?Q7X0+5(MOx^CnlrH#&I+>I;~7&KUDt_wme*r= zB0LC}ktwPQ*TlVls`%EURSV#$Jf;m#b@bc=2u_U?SJ7c9Dsw0B$(gWHdT2gb-4|~H z=*%y8q*r^u=Ar|n5SV-Y=0l`|cxx{BA_*>@+kf>~;0QAcI&g)R1HV6U(gVl;0ICG3 z<;$xr*<}oE6$HBXzyJQ0mXJgCCQm|TL5Q#yVGvF=;(-xlV9_HRgRcQ$SqK2Wpy2@) ze)@R3ODF{H1K14Ef{9su_x*Q=zyPL+<4Je}_<;v- z0*NH&%2GKfE?G*ca1pHIgdosEgL@QV;`f$G>V0V+0t`U{P|m*RZ46~vj7N)$+;gjI zprBQV3~PFOBPFf805kvyJI5vc`2YZn;)}`Rcqi$V|txVK;^JGw)OTAXoA^yO)61&7rDp(4HTp=DF6UK6B{ys06kHZN6punQi zfXD*s-~b>A;2<0@pf(5`OGgDZ5$3_}C#kFkVbwUsM3H}JL!g{0x!?iO?rdysaWZ`Y zwR5m8NiZGbkWfOK>`2H!$w>#M*uVL~W?;fNgd1SkBrru<4?vi2;6{*mSte{!1OK-m z=**L3WX4yf3?7|SZA;fJe}l=8C3IKelP+4!sL&buA3A_dk&p&}$9C1k3r7q0siZ_= zws7`hGD+%K2t>ED2knsfM$t537+|zFa z*dAu$zP5mdh3`vIcb!s1BsQjJ)}Hjj*w1o2z_}#S447}Mg2HBLr2a{V`Sw9<5K7tw zB#?`dcf=j9F)LHNk7%1k}Ih7<pATjkQjii}?x zC4!JhKnRwKDMi>HVV4?!1VPqST30+RX*9U{*+c+E=`}JZmCRRG!&mhjI zhGckUk`!rs%{IrLWf#OyT-6A{I0uB)(Ij!bVYZIJHf0b`$XTOWB&3r~FP;(zktM)z z>udtzFBgY@NE+TpB7X;pF<4gCe3WntdwA{Kk33vnE?hl^P)J$_{vs8Q$OsoPXW)sV z$lLmbdSmVMk3}(44&}G$Kb#qO#Tj>*Ij=oqQ711o~Ep-tgobRXO&k ztfX{Pl`|ykBs~!jD2YWA)(+Zzw}1qkG0@wgZ5!$LN}AXD^&J8j0V_Yh`fuQg0HHu$ zzop>Qa`glBIc}IPZ-zHt)@~ff_F7+JJ4#v959Q|E@a?kNp*riY_>oDjS_lL@Asj@U z8Wk)iPz?q#4hRaUpJd@?+Rn`2Iu8Kj#>u_8EFCY;`+gt7X%Z6uz24votW2+h@#Msr zoJz!BAlw0lFOK9sz#}Bwf@Sl7q@%r%7B>E}L1esF{)_?nKio>=ANhhSBlq=rbh_3s zV7K`H=q=~}=N*iykN@ZqUq><<@!4qoBu=+Opej><&*=_-?Xja0rxlZjr;7|mN z(W1nu!vvb6p{$QTye|c*3hW2lKwAez@ekh$e0ZQH5RoY}%i9+B&c~Z{pQ2ERkZJ>z zO>P`E)s7PNb*Ch=DX65HC#JS%e~8oXc8!_Ty|PQkHVeeq(p`{3^r?JrZhw2+w38}K zH~3n2EUd>t)V&b_a}`zF=n8D^57{qsv%ma*(j{U)S5K$)>jV_@{}6eG<( zB^RzR9amtBD)L&cn2FT3zF&IB%HY61^79IlzyF5t_(Ct*7l^fM6#v`!000VJA>jlB zx#*Qk@Q?Z)r==AX&Eq1Q2pqhQ(%G*ER|V>q$*z@uO1(&ZWnTsHp*A!^c=S(~YYzz@ z#Y-5pV%F2ecEXi8o-?KxaWfy;G=D353V-caUsoID6QKA<1pF<3aKyB_Rm^YyA`B@E zd%zX;jT!bEz6BtYfC!BqmOZsArYs$HC=d`RP*?;(q4EHAZpFEibxkViIRkM?U0wL2 zk^@uV5x!2u0B^B%yg;d0{=O0`{+3#@omsBS6=tJ@MVfY`C%0*tu-BG|5JKTIv`#%C zi)iR#Fai;!QLpt`R|&|q#7sd+oYeCV|MU{0jWlqx5ve*zQH)3(i6Ctw!!6&QJVxgr z*n9m)591*hivYQ6ae@v4b6ruw*~-Qyr1cgmri>$sO1Oj49FuzV36vl~~W)NcVPt*Rh zA$~-F0Y)$m0j4q2zJfL7!r&IZn(kP+TGwn0uoEm_X3u8NNMB&uv-YtMoBZSS72x{I zP4gle84J@w1f&?oM8_T|w+1WB!xpT3Iu(-=^s|MAe8dCF?T3TP5@e}|6hO@PcHl+# zYgOb^FvibVvL}I##K+(T$Z)c@X zSPAyDQqw4vq;0du6~47FPFvq*R-IZ&l}{{TiNR(ig`V z+AnjBF_&t{U`ByR;8HMy!ioP(&=1-NP|$S}QkjfjVJqRJC8C51!5F55biYntGhBg` z3>AWmM^}v|Ax7YEVU|U72g8O%oB{i8Pn53vzt}*IjO#M)ect;SaRuAQD*1>oo1IUv ztbMdl+>TLNmHRB;d=SJsECKGTppE&J2mT^Di36yS2*68*-g^-2Db~mrS|BQz~nt~rFUwaU6 zWl!J18%!&w2TXSTe@a@gBMT}#$cl%NQxRb#L?ZU;H8p#>2nb`pr2y(ewKstAzD(=1 zAXbeYjT}Z&FOa&d*#AGE`tQ-5d$1ssNdCvb-R>v}c}QDawMLR4c(XK^Do4ZS1i_pt z;m39mk?~B~mP(NxMKG3O8iDdc;Qv#}>*aT)r|e4W@H>K-|G#deT;RXe4KmAo$|cJ;Gx1`#<5cY&$Y`|}#o+r1mhS)c{k>u0lkX~Y zL=Gy5LtKMu1GQw?n`O6_Bi6fm`us%R)W$l1@s9SU{5SziTwpeH#56$VNNJc442th$ z*NlW2N~YtF_}{o?rZO*K)ZxY=a4gc)_CGNjqU4;`zav4d~7jt9^&;5#uN=Qz2}%hc6X^no zM#ZamRM-lPE_}O&;JYzkLoi|W1(Hk@7YF)UB-a3$Kkk@!V<7~twVtm+%XMO-?Zpwm z?NaD#5wj6?+IS>FG#v-9e;*MoJO2?KWkLIWXhy0IjO1~Bo1*BauPb*Hgh`pLj2uUM zZw-g-vpXrEctZDN^>BssS~T*jo>Mw0ONo6yep<-US|M8=H$ZSg0;FWhk=8$AB=`au z=2DMJ-iI=)y0J}tY-aY%F8_=pXjCTc&;L6&{*ue689QFiVo{HMwWZS$TTz>z<#<1l zhgz>Bsr5(yEjO4*o|_GmT)u)PVYU@P{$Mpu5#q2U17E` z1dFFR#A3X zra^?P1gadcUR^K0E!7+>K}=O5gnbEpIcw>%QsvvU>yZf(_8jrpdz6?NNi%=xsS1@D zDW?Rg+Jds9z~t(B1_6!JZu+j`iV29^7`kTuK{gG8c4+e5 zyN+oj(4v7s_WT%mJu`E%&a_u%*Yq{N5iK7?9FAA@&Et)9F?1Nr%)UQwt^eBI1{0Kk48qXNS!11L%29qqW%XVzD3g`rT5ChCfGQx{Vsn$=;}i052*){2&M}w4mV}< zsQ5d~E!CM`95W&E;ku=^M&da!&0;pNVhR|{qYPks8q@O?am6&08pzTZf z{R=|3#dV1 zkzxm1%ZiV|t%4}^F&p|qM6c|r8580$1L0v`e286s?P?LU*NcnlzM7EUKkQZ*_y|G7 z&MGVt>;PiY+2^Yc%DC2qAv<9>2;ZN5JU!z^>2DAVM4=)AK%_`42|yYRWv4oO>Q{l| z06xNKs4q$*$IG|v8FKx^6O)~Lo0r`32NZ>30=4deHa|A`D!c#y z2wx%M2n9LmLNYG({)eljBGIo?ZQR&Qt^Bi z5~+Mo;J{l%w&7yL7q9WasNNmY!UrgfI?As|7M!pgH}~VW2p&pT{ieG94j8WoYFainu6T~_xI{{E?)-+FQd8Ky7gKr217%)J= zf`C?^8q;XvEgS|(61xH8a7{DY`PBjQX+-1U&<2$|DjMR-Tk0jHn3>MeU*C|#w{Bj> zd}T~tGf}O5R>8bXmUa&izJx`C77c@7&^rgc$3|L@$n4aFS-y=^=0OmI)A9Oe-^{`* zFM9S7?!fOAita10KqC``-sK_nbz1+#L-QA5?GI>dzMHH>b&^I~fcv>-<{oK3wGgNr znrL+nNH4;|-QnQklZXx%Eis};t<1n8?2hfGE~Fj;^vWIvSZ16*8W(OA{?owjlsGg_ zAA5f?OezvYF{RwW$>&ig0ODns8%Q;@2NNO^gTUuy-;p1$<-DlPMh5ZRT}7Hg^naj_ zp@9<3HCW&H@lQ0`R69G%*c_kft^~QXm@&qqk8F7LN$W9YMeCR`=Yf z!aM2FYALVan7Fg1g(2i(c8tG)#294+6C(oL4;F8dHa`>QB{co=y^)S6P=lCs z?F@w9AkTIdPegCp?QPGJUhT@8aP#(C5KvhXD@%g=yBA0xHvMaUwJ<`TPBi5czSNSH zL2K=KqRLuq@(^qNRcodyRxe_)Wofoz%?$vgp3MY6Fh=$&7``}})+S)!a3xF|fyJ0e zO~>EwE4~(u5EmB}IK=Kcs{c~2@aq?tgZsICYB)<0)gU@=fl~AFY+oz3-~~DXQVPNW z(4%R#G=kp&Yq^lXJLiM?^lrexGuyU;*<=$v{HZi&g0V*mrr8Jt0b}TsGKPrGS@dcD z55a9_N^oknDClQ6a1RKN1B22Wj{iR~4HM#qE-B4~8xw{kK^TS)wY**jUT*Dv#qwj7e8|M-MMO=N z^jW_6en%U}w+F081FLA*cG?mQx6AgGVk*4sVHOdjI@YRbN36t-MZGKEqXMU57Cvhi z%W4#1z@<}*d$p$VmDRpfu^}NS?4=he#zOf;6)Jbn-tk-7Uqso?XCPHfP72Cz!a&@gk7 zsL`=~gJ84|*tpk~dg5*Wm{v7T%7BP8JTCOb7Ac7wET`-J>jWemv1$*oy5|qyf@Cq0 z%moqRX5SuhuU0SFyaE?>f3_2ZR0rlX+!K)4=fHUVO_zilJc?NS)XhcleAG2Ic#n)i z{Nx>jVAV=YJC<73rpjF%6)!`dG(rd~S&oT#*cjFrW@TsPr0_ zt}6!g&E6b)=8$3!*~Xu%F|PbHg^kA9D`kW4mf}fvU32%t>r5MRg3Sk9RZ|C7=y#qu z+hN-l^LG1sW60k)YuCJt6_r1)T0bq{hCUib@Op&3^mo|EKvma$D}@xtZpGzi#BFtc zZ9V`~&tR30z5d!V?!=bluzh@l&^um4W<0Ce=X!!<4s#x!1Kk^Ee*Z_^+6tCW zS}8g+{6kLS@TA%CFXg;4fe8XF7%IZ}7!PQu1HxE9gzwO?5~HL*wHi zTzVw>T`cH~7(6oxdqCeEZ+mj3BcJA1Q1q6XEb)W@wV@);lMge~s*Yea!Y7C!f}$7$ zw7>o(X3gS@^+-{#jC=UlOzvFem$qWe5ON_*L?pH(1;98LidB`a{yezTJ~y7CF`+L& znt~h+iJv+Z(V^Il*ps?9_^}-Sp=*EWs!7nkmA{yxTXa5HZQ@0^AFV%wwooaA#|3>} zyYSokVd&2(^1HSX0vN~wBLIz#0uzar570XMLGUydK<}nFy7E5dB*WP9L84DL`LmCZ{_hVW@!D%mnvhC(Wt1(Y2=RK}fmDFEcBsVAC@v(h9AW==e44J=NOkG2N6VN#}r<_ z4ltz94)>?c9$v*&AfpUSCJEbECjs>B;pul=N6Jav>pwELt8A>k%h!{uDlb*^)P?UW zpx9ZVU+5n=tvVDw0A+vmH2wPTd%KOBEIJ5B#QKK;;p4)12bnv_u+>3ak--?6@ zgP=qzX5cp$PLa}H_&S`oH%JBv9>E?wDQaW=mAe6VD6?Dys5DWKg79}$56JZrnQJ|3 zki*H0n^)v4r~Uf%{r~^~&mrRs1vqd04^Y?osnHhtNd*7ZPRp|TisnpQPx@+3O?BNL z^LvZwN+bO>)Y=F6v=3aOEpM1lMdZIdYl7t8=-Kd|mwk5czEXF8=&Jd<{N20kjDN*Y z{>n-GMs@=vPNsnp(%UBjFzQH3No z=q1o!CZzm{RnaWFr7+P01%^m34Lh5?AJG^<;{77<@QI@QF~2E#@nwgG0%3lk?+}|d zk%oNX`e&&Jy2qnBMGDMY*}8(!Z=0`qbD16_(ZZ1Ttyb$b7N{)}4Fm_FvIPeNfQkX)uOhcchMu|Ms(0G)=-#4i zM-M|Ik6-_q7$Udz#JdyEgcl)7v;xQjgf#G2PbkzWj$cLldD?cDw6-A^cof$?hYkB_ zE0HWNGMQ&gejL0XrtL;K1RAY<_bAX3`(kbjeZPz_!rk2Z4b=YqefZ_F@pMuk2rJVj zEMk|hOL2T}-ag80D;G$)pqZq?^4jk=kVJpgeu3Bs5}1M{%mo|w?(q|n?EaAc!LW1R zv1Rw9CdW`d4`fP2U&uA}ETR-zqS8TR{`8n8mjB)MH&a7XdhL1QpeN;@G&b2NQ2Nn{AfF-s-tpkoa8_) zv&v}#%z!Luqj3>-fKS`%!@z(Q67Rkpio!tR7}b<0D4+l;a79KUknUP@qp-_NSOg@c@5UZiphm_oq2wp1FJ|G{p)WU_f#UTgZWfr;w5N#eWscSt$@u4UiwD zAS`J@vjrMSTrghqsdMnxi1PCTFO#b%pNjE^UblDT`npY5H?Vt)&^8W2Og_|t!Xl=D zyCX;5$!db51`Fvhakg*WF5fsRN^-dTAar1T@~%G19@tDg3fOBDo(hHOz;mBfV?lBX zFti$2#E7_SwdX5|>j;P3Tfv}T-5B6n;o|F1NcbiC7t$JQs%pYJ zAi4%pT%?Leo9O}Y<1lr96q-HsAV?t$f`T77jtB#ak>@h`%6M!l8XzoKxVUw>4_U;C zr$3Ivv@dfiz9>V40;r%kfFl7zj%U;CLV3L3coo9EO8j)DhT&uiYO|K@s*B&)ClRV5 z_#fGSyCUeJGq*%|Tk%r88VP@Dsp*t8cbzAvsUBoQD!X9I@n{zVLAcEcDWV(lw# zAXtVX$WzM5GP(r81WWeM;Ve;J`E;KzM+piR7YdvpAUG6aWpnD7P(cq2l;srFUcF

9ER0s$qC8nz z69mHyC_~lgKwL(sKyQ~3lL&!LYa8XF%mMxK#?&JyjVBEW2@C1(@zVTv=4PJ&DXH-a zZ06$!Zy5Y`Te1uoC@fN;s30tUoW{jBuo&sp`0(vB%~Xq(DcCgLt$rk~{%D9;yKrOS z&+nPRRskD~?M*M**RTH&C?^s1;0GSGIvi_qMdO}iWL^8qsTR={1QrLn;5ysTS|XV; zs1)*n_8ObAyEWgTh-fAdQ)5q~u;evf`VnB*J*(N^+Xp`_fh$a;b)h~0~tct3!#Ax-+7Ir z*Y$78gQF z#0sYvtZ%#q$;fI=>v& zL~f-nXf5rR*~+x07y`@&u%Tu-1dmangQi#6?!nj41J&L>Y_WbF0Rvf**;-M4sckv3Cg-&9Y`?T4? z!9AX$`?bfj!1=B!3}nVplM79)Ub8U=F7{22VG#pUB3{Ca936UMR3PohN$g*{pY`wk z51DI_X(z7-U7o`0?VtA8_6yHmI~>|yB5&ZL&F833W!?jUVv&`GW!LH>eFNp>?RbY!K2qhQN9bw#QmnMMxj_xlY2lrad^CI#*FYLW=HnaSe^AI7Q5xsMXr zOn8B+7mfWq=o4?^LP9I|#2b+b6IyHE%iI2<&Dre!An5C(?!qiYwrobiM#h*)gt`g) zcDY@|k(9(9Rze{80HH9--o-G~Nbt8^v`C&J=1efk-|7BG{{%&wDwE-AZS-3IZf`SA za;Tu>F+Dsr&;vO3RY!39_d&!(?x#~Q0b@v1qVNkOOLVfPAS77uh#sViiYBihbW8wx-@MJ`6r)P#p&(wuZ*;UcchB z{vW}8@<-A9%yjFED@||wzPV=Jid5Y3#1uV*M-)>C5Mi$eR_0#Px5Rn`#Df4XA~ljw zm##vD&VcLS^*^vI#>;y>Ic<=^(DYHPx0J>1`bHfIHv9TQGL&rHA`s;dJ<@-4;W8wb zrDaJfx{~XVNg1>D4HT_dL5nMj3Njob5dk0`R2GLXV;VO*m7OUdO23F0cx3eT@Op&4 zmo{YpN$-c{S6dgCG1e}%>BGf3=mIj>Kuip(r|$ltuPCR8%t6ILhi1|K)>%70ke%iq zMnC%z2EVzg$;hng4Y{X%pMi>f=BNomqlmPKX-Ef<6#}wt>+q@bLTH5Y*WoBAhKC8D zn-kfC<$l$Di39=VOa2r3Dh-u~oz;CSs_GJ48Eq7Qk#rBS z8#D}Iss=XG^8G;BWd;Mw*+X12^%y7%+RdGVg8r=!a0zx;emi=hLW0pSVJc;E3~OoU z5kDX6zb#gS#tk-Oj5uBa4HGuLctZMm+mv%pd^mMX>*76rP33hK5*`r13DhhU<3kt& zaLr!#^#hf){leXL|1>kAW3}N5TX>z&##0is%fDX`-d{6z=R?C|WwGq>qXlDuzw{5p zt^(m~uqk@%S|GuN6#FV1Eko=UuKlvt9ASjnbCPH| zbbu^{DNK)%VQ%-+r7$tZnZ?3q*gFSc(dYYBB$0%@KfayS$#3$yk(l`v(2~hJ-U z!2pEg>i-TY*Fp{@i@yLsEkdW>5EpTe1f&4jIs}kXb1frc<#-gSaRu>_oXo-xB^EW8 zpp`@9G!rmZCtW;J*chgLii-Z)Q8YZAvB5CrAXIdaxk}87%h^U0h(KjHp*&|SnWPjJ ztq@Me`%_DlyZ0e4YxQrYq%B_*iLng=O_6@vEanAJ{$73l#E;EpCbgM}prn!pCJO=M z_}~MLgWLae6s==Qu^xNCw!&@4ihpw6jeovW+&VC6f}?=Z7#9}-96WJxJOK~y_h3!t zFWD{d4T zFryixL=_1jP)7}mmfW;C%$Qx?Zesy&%>k4}z6`0|6ow&0w3$jMye9iCE}_;^mqMvI z`9BNQYGAflq+FU~A4D0ye`oBdBWz9HSPYwfwIwPwtWaoBiQO_^ATHNS4`Ahh;d12^ zM~4fr$OyG*B=){wVsI^p^j9K~0`)5n0?m|xz`EyyfR%|Es&uEpz*vr4xZsn6iShR{ z99MiB^>i=qnG{9}H{I4OP9qRca?HWe=_B&THy6t>^6w6`)N-=*7o3V(CYg!XdEoqjUNQ|{{%??&~`4SM z%;Ky=Wgj1r1LoC(pu^sqL0-J!9iA}6H5f+zA-c?Gw)3etR$_|HqHI(*aVykZXFXs$ z$xsKQF>YL9W}1JYZT!5qbpY_yxc6b*co=MUf%Uq7mfIY5!U#!)m=F(WV7Gmt4g_vf zG@{4wO}OeEQ?#9Sl_hISa^3&o2>&0;k7apk#fTvVMhRu0r^jv&2__8&`z!>B`27A= zb?1`Oh`8x6=Msi9)4Q9y{jGz;aKckIRj3RMA&qy?4+a+``5VWAeh?1F&r< zI*81^kx;RM$|P*`{SgX7Czbk__O6!81tFkT>1w5iy<@|Ee5ok<90)-n(lZpu6cvJR zZe$Di!;e`w!2C1C%^7}f_koQ+)#8k0O)yG2F?PZL<#30CG1<@DFj+tv6$-(G>F(UN zlK4Pa?4~Lym6(&jW+Xb$!VCxE>sl7HKx`nbXomr;Fr_8_+j5z0zm{DoxvlJ9F(7s& zgRvlXB`%lAa#+5Bpm?xkAo+W~Ap~`&n^agZugbi^>b$GdK+y(Z0*AhFx4uism7!?W!+r+D?i|*N&gBUx^188eiT-dd&r*wB?q9gHs#q_rrB7>EFkK5b$ z?t;aMDqD3;-Ef++S}0-i|~dV%J4WUV#{m%Xbt!QDkCfV>dAPGsqe+#37w`e_SWwZ@jRIU6(_S;Y5%58o@1qb9tT8AHK4!xos>55eh9^XQY?37ROqZc7oH|5pg3DX(o~} zCJm&ZIco^I9U`HTGD6I_G7&l2m`_LV5U`x8nrUnWP6QwYaS99-29v-$a*E=wOz(}E zbWsnV_@GoReLHt|@!AD<`J_4KvX1#}LCJxP6#|xlAbbjd#A*kudqD4jBmkup5gY}M zUR0xw9Ar`s|yUb#Xbiy<~~|F;*uNPxOr=nXMT`Q>6-ce9o@*j`%v zLu9hPc?;3xi&PibbdgFb?|%6r*dNV9o_w~88jf-b23iapat_$K20}3CG!Akih&sS< zlw7!6)G);2aRmNd*hiUmk_MK^dsXb*L4nXSHt_**gA=DOA{QOmV9ZJbVH+oI;l)&! zcxDp{3aJBQWp`iIDPJE4hYqoWN-S-9w_gbG?>Yu=Ep$Wyi>1U||6IMGKM|ZBBcPC5 z8(A0F<-0C{059On?4|e6N58Og*wJ)5kY#PM!$dd?G~6vpf-10b)NC6D)FYC8_(7|+ zovicT&7X_qBvM#79zh2KNcg^ipn3*@*kyuNUXSAWi371L9f<+PoBUZ948*yO?eP3@y!iL#X~}-Kz`bk$UkL^_>CURaUaB7MuW^jaR=N*^zmIZOnj zjjj?ncSv?xoI$|x{Ty0DD){<05zWsln1|(Ht>L)2K2++Q8iAn#L8LguapT|}?)ymK zl%J0XXeLiDQk*BlM6c914?;om$L{0_h=j-9{G{p$grVI2b}VwgwvhrRyfgNuG;%#9 zD1({*T?$zGN228=_IY%vrB8_wrP2-ae6G>RJxn~Pl%q%CsHwUWA4u&VZ=< zRd5fdev9dX($IQ+61@+R5hX%l*r~r(;d|HX5nX42P3(bJLlj!siE6P3L_T6LNO8O^ zT$Tkw@3uOF`A8(^P=Zc7OgX>p{}ZS!lig{*#2UZGxvhJQA#h^B`IJ!>a)rU0 z{TMglFGx%NGXiN8EcjAgDr`As2X^q%9!KdTrL%t?S~PZ2=4)^9Wl5gQH~&|QmYGB` z*+9qHSBj=l$uAdug;uR4Q|zR6rS#HJ+#|kHB_mc+BQ069l)Z!*#*r9uj5XtPT%j|8 zyEpHy&)C4LngAkY9?DmDXh{?;>)4e_k@8bP<-8f$S;Ek?J$^K6 z#Cq^o6uk@3_>9YmT$xHl47&|Db!lV4h5E+oOQlr#qxVw3UwS&KZxhZ^tk zlAA3xjYGD{^QMuV73`ly?H^(>em!nivOjGzTjyTSmAePfB0#LcG7%WghA;z+n+juX z50-jIeYIA7LZtF9*cxQJ-tZ~VZ|BTnqaYE2oFqbntH>NJl3U0H-JfyAQH6prfGK6Y zo=O-@$X|Go38&xeqC`UlGrC~w{zegWOn&YC#0eOEz&?jV$@_@W;C7k|LFc-394CRm zHZ}3kb|n>a=zznFThXBGOfrLssMT`u``)O%z%PjWP>=ih5%EqBT zx(4BmZTKa9yZo2?6{Oh&)>9p8*NJ{md*!c%A&zlT%(son*Fhk1yP*M5;%%-FASuF$ z{z60~8bzP^qk>c61PhA;igfGoevn9^QGE;0K6h6kx)EaHOUk{Eq%}c1+#+G)_A|d@ z3W*;pwqM@z3=nT3=2L7PzELlW^4CmqBiD0ld69mPl~jeV*KzBWPsJ!eLE3}96o@-~ zCoZYEl0ycGa5V9n2NL}8^=03=7(fN&6ewAw)03DM8)S{iQDrT(qA;@ft_{!(7${&e z85?i8#Ryo24|)4;Zw6ib386uVK?)LRQJ=~3gzyhkmk|*#fs(WSBu{Hq@{zPQS$!lT z%rHuGh66&Cn=ZumD5$;J3J@syC#?>b%Dr|a^0JvM{fxDp+QIptt7paf2X3Ic&+hH? z)R}=lbnb4|3?=B1P0EqS(EnlZKa(>vG2bbB6OhLy>H^b5v!1RsdS~7a1pWY^Ftq?f zW6A$}%Cr?i0T`hwaUrpRLqIOpd%vTpd~?Tt@q4X?ihKI$GoWCP%uB}s5LZE$nVT~_ zz5AY7u~9+#j~2SWyG9w6KR?s`<=w8`MlFsd@Em=nNkd^OgO;59d2qMU2bEtGu=7cn z_M@>?L6>*mo+W4cE?p}Ac?+cY000a1A?6qaxSA!`k$3-+gh$Ia-)g_rT*~X_lCmd* z>`U&0hc^xCNc~twUOQ)6tzM@8qAKA`rG8|Apu&X2kYeWagxGd-E{0zu^7uCo18~ST zf}@Oa5B3#LoVICl>*yfKmUij9)kLGZ%ianEoIDA~0`N=`_vU~NUQo~s)?&ZMa% zfdG*xG(k*AB^N{}`Xk}djl<}XVhqFCv#2~F>nloITys>nmZV7W#(e{o=s-ydq8CL4 zWCUtkrr2;E)m01nfO)aii<=VErOy-s&F(G+-7WpD_xp&jjlP)6BH-V2{6vJ1HU3}Z z4!_jw3akWAq7RR>Q-IJnrR*vqlu8pKIM&)e`F!ZbJmd-*908y^S}YrffwEwC>RKTs zNjzvo{lqz5n3fCr7sj%sbxvK z{^}m~vfbE$p|O_`94KB9Nx}~E;@8qeKY&t&V{7=Uke$`P^1Xxw2q{A*R#p@cI|u=7 zOs0+h59(-En};d8jBob6(Ya2a)Ib2FyR|_?1gpyL5GZjQgJ75{2#zl+Aw*FvaU`I@ zr)Rv!FNh%Em?A=j41ESXS=7DxzhO{;VNM;ZiQ*|0923MMjFS4*%iwoaY~}o~Hg&(G zfZoN@Nu1<|{7hy`_klNy_I(4^{(>-LrgyUvk{x}Jls6jD?J5`{zZy46*@`raH9KU6Dq3*?2te%R%7zdP*`WrTrhR_S3p3IGjijL5WJ0;Riqmwu+Y>b^<3mhK zV3-l8Ktq_=HE&kK98n*2x2aS}(y=QOG3(8+eR#gpMZ}$rz3E5S2uZT+I4~9l0ZA-7 zl`du-Uro6d4>7cq7fegvwjRz>ksGaR!3N7=_I%pY%GXhw%43#js)yZ*~6=uIC;-|kZ~SLJ==IKLPx+g^9;-9npsKNul`ptL58?4uxf< z$9ui2G!L*L3k@?2rAM|Tog(G6kD}3A#3Mcj@0Z<(Mars#l;e&>LX0Tjo-!+iqa}}T zzm#`f@77w*5K2=N=NK!1Jf5-+EnXxWcYy0eU*{+yLD2s=k_!}Kjho^N#reulVPg&G zh%i%kZ*B%SwqhnDv|=VBuyr^_Zver*o>he+{=vc9EalQqWCG$zLNq2cGcpd5Ansb4 z-*VCA-k&T5>BSmWcuFfGHeHes9#g_jc4lE=A6~*$Dgg4&?a>75R4{B*RDgQ@3+A7q zm*TGIV(rMy4@*y`)Ju*g&B|BYUuJ4!%?p|#Pyf``*D^0a{x62|%_EI6t5AV3$@ls{`p>m8~nCRYP2ICFB z6dPmf%F~zE_f{EhH+?`IrAD7d~ytmjXkEF;q zo6W^Ba?D;1u5i#&y{x?$B&zknqyMJLSH;4tsE1K0+(K)5AL6v+W=3RU(-?%g@OF+; zd_cKLQ_;$B-RK{WlJDX8iwD>OeY{8p${>OC=H`9ic z)HW5mHSWEx>W&tvSq{HLnHtGkY^fVlDoL?{0ieh~uSjWoPxF;BHAhEt{FwCys}NS@ zIFN{Oj_~!mzb3@`)dLdQ)Jy%w$p`WlFanNTWuIM~XF$r+TVbFMcs&Ew8!#gSp_up& z){bP@@esWWZ(EeD%pT{L7;)-=pJJ@^FgZ+7Py6FWi4siv<=yffmk3D9p|R{mUqZjK z%#1{5zWNY#Hel+{;w=2x*pBeN@0Hx!5FwD{P@xEyU9)p_xDb(+`8FciV4eN*Px*Zd z@0Zj|F-TxiT;C<+QdKIV5=tR89=at4Xd?EJW`&&P4Re^-(~qJDXmZsLRvIBtc?XbP z0E90%&h{;t1x|&hP5v9e4wbWco)Pym0EFRcYt^6$Bzj| zvs30`OeOv-q_Af4TP#}bTNeiAP9wp&k@7#q!mH@!>ck{LkMD|vdrd3w&#}Ewdj@0QV~Wn<|O$Fpk&7HEblVyClb`14Wpe;Y1*b*8q3 zSI}(b>_Jdjp+UAJJgh7S5J*{XsaXfotpOkB{vb+!WtXuGLvxsEjcnL98)>WFl+Vb2f>1o)WrLP@606(Df^M!hYgAWvj2Jer(ph0f%*}! zJTY7SsAeOMHE|uTc#VTCw#B5IWzJta2guMc7*Zmc1!Teq9%29g3D;B)%C1YhK~oWq zW>yCRS77ICbH=<05X8)tUI~v-FO;d@H1$vv)6jl5U+f|*Sb@uS!C?lf8}kscwU^M< zy@=p_uRXd|BtkrI`YR{-h|(sx{{H513@!RDna5rGNV917S}=~! z0|$@yBDH+0IY-8{f3mL8eVRUlD(wjs;bGt6G#}|Bl{}SdZGwS{Fm2Spz>>5gV{}{o z#30m{KY!TKFP)kEU`To?xM>;gO7!ivr6@)dGK=sC&bt2sZv9&x+smqXkWw*;KL(Vz z4`d~T4>HEom1DmIC4x|YuukajS~U0$C)A%+!2 z40;pkk4zH*L6AU_=uxwdyi9gi`x5j1&xtrdhx}J@sxyoYE|plX9EyHS=T#wk0Ga^o zf|jnvDVX5=Bpf-90<$(z1!ZgseT=A?!Gk7An#)J&39qKUnOCNrO!iI{dK$w8`f5p; ztbUoC6+=>s{$E3&FYbUNLWE26`B>H8nz}>$VXE*FsF|C!<$+H{zwa$5gg~$om=e+0 z6d3POFX)->|3FX?1aYk&2`y!ugyoVrhpaaEkfr8#X_S` znU@v5NVc6nUlgyKKKw&!yZ`_TWFhAq1i9u{AwS>sC)7-u|3lT$HcM3^Xq!NR`QVxPX0C#fD8(NM5E75(}3W9^cMMys>3%Um^g%dg-{eK0( zWwMpr5(G(cVPaf5V;OV`SBR75iGt&-Gr%X?A4tC7#asVuIOAt9N44pzKlTD!kfazr zNTr*+5X;A!^g;!E+I-o2{5c#j-U5tkPQ{NLpz1KPM@x@iJMef)2OtZ!xnU=L|7%l8 z&g(0tG&m5NFy{;50U<*%*A^XI#{bcG#4@fT{Mj;PR-QC*!(}PLgGhl{-7>-9ktUR? z^K)`)L_gIPw3C(pr~%}1-QH<^8K2|&Lgrq~jzhC?StQ{k%kV_ex^e=GV=I(|p|=h? zQ85FX65B7IE0zPZQr{=nFpT;?kVJ8z#I%w7!1{zxGz{daux)u3=*V(Cvmk)trYdp~ z*DYmM6Cl)>!A!zw;F7q?{Fn*%X{V;(#J?;%h<9WdG#CR>0^5$Rn3J9Gmo&Hgi{1eK zjac(!yHX@lj0D4hC4@y3iU+nH-T69bM>^2KMjksKXN8yPA_FW*C?4%ZPmc;Ly1s`G zDG+`{cvjG0{D{lxochuYAAzMp+Ho3eYT$|r~Izi_`=FTaa@5!54H7;Y3HGt;)vy=gVIA0fOq)v zDZ7&w?gc@Xf6Q>AMkU4&LR6swZ@xLpb~0`?8HOZ%xm~gFkt)R7!k8pGn{#2s6EV_U z4sH`KS%R5QXP22k4$fM~S;&R7x8at#=)ItTEXnl~K^}q8l9>99C8XCb_h4OspMa(8 z3j8A?@-G56uk&#D4^PWglz3g)V`B)Po6LcjsiPhTgl(u-jmj8_g_<@%Qc-2B$n7S( z_9>q|X_QHYn?s|`a@~~{V8xVz0|JWr%Fvi;a10hUlATxb&9 zZV@`^_9`D|&T=3o`f^59vU8p{@MCv}@KF7eP=E6`bNR$@#a zx9qE$AYkD}A!Ep<7Vy_xlP7=a&-dMjAgsD~csQ0NE#Jiiv40cJSg0U^It~9Ybc{)s zs)MiJoUBEP$CJr1;iaSI*X*gRLhAzZH@$;CDt^T>=BK%Y)8FL%=ykB2f|KK;LC zx7{y(Ot*G8G|4XHgTM0#1{=RfCF$VdEU zFoKP=G=r)K^n^%B1JE)O@Hf4=2rEbQ+ug7#j1WqeR1SFsEf5+KJ}dNo&`e5vKrBCY zE(kFPR7|AZD|-#fOWx1cvb5mfTTw1mxIj^33xEcrz&7!Cu2FRej-5o|VSyAZ-rk2p zw+D!KuzLoJG_)XR*<$A7Kf5AFFgfk94jod#pwQlpm;`a1MQ;2oFh+$^2qYXpxvB3 z>QeV%cR-0G3guo^?4qE~K`|o|^_S4yS}Fr{t9v|9(>lmEvV?I*73E!e;vpQqv(sk{ z&-O|5y|5U#HDI5FbF&>uQ{p?Q3Vs92T?f=5J2RCwuk+hHh+n=^2}m=4yDsF#k! zbT8CWIY^X`i{+r}ML2&6lVE-F>bywqEVw(6ii5>LuyXg{{f>nq{i}c;)%RB!MFx!V zeM63@3N=10M6G<#oj;>uunsCO`uKrl4p|+JDRI~SmV}C<_B|=0h4GqN&Al8=x8jJ! zHZXZGvwPUtkV3sAr@Z^`{0xW|bT~p^R{OgetPZTZR4kU+NIeZFmKq$vqjq9k%_;i; z3-^aI4Zc1z{I_b1M0flh>8VQB_(d@PWlQpg9|p1OTyfj_Omfy8f}#$DBiKOz`%4K} z?&}X6g0Dzw+G8AcHU1r zraw!ggWJcAYo($B>V9{^C^N3q1ULs?k_!9L_U|4L&?s@AwTubQ6w}^V;E2pzJI_vb z!2)qH#(nX;G(Ow=vJk>Fd<7wd z38SMiB32)n2-3@*R`SlB)kJ4%H(rtND|w^o;Krfo4PSN#WJ>>;=Hd6%1|Az``6W(2 z$S~$%+VVi_NP<3b%MaHp>+Rr{#aya8ZfPMb_kLJ*ab|=_8cNCJl|S_TU<+*0Yd=g6 zCZ0OMoR;*Ty)gbiXy&uoNb-{J7aXlaTAqpfhwH>a_@}w9&!A}}S#0)VtLR+z(>7vT z6<=&!BVLiO%?S5{wGBtQU~%e5pG7X=C6Tv|Ho%zv0D53QltObIDHo=ts@7id=Rmjv z-@JQp9AxS^E#mjYi-QBox{^wbiS7XYENkVACt{2Xtr8JfX`z9(`*W0ffP9PQUVQfm zwllG?@1Q=IK}43{ghy)M-^;BRLW(L7C4<<5e-y>zJp!Xe<~>_LeFP!>61new8&Gqc9GkGF)DVeH=?-uqzx5EfU9yo1{#_ zLjfTeUGzFCV!-Q@KM6A2pWB^5UJ0(Gz^yZ`f0lxxRGu}BLJM3ad~K9IVGcwvheInU zTN)cxCR(I#1(qe0HF#XX7(Yt#Ih)xC2!sSy%`wVxf$pb9KWAV46-hAy3InZ4lj>w= z+%%m{5jG{TGh*Pg=%#Y-_@a!9P!1@gCAbgOde7${DSMfhu`xUpzzdL!Da6QZa`qF1 z_;rG^m}_7oUC3|ZJV_dCQgt;u9)!s%li5o7UfQHEjgSr{>FMGeyvN5-SJGTs)E{iG z+t~s-6c6Oe_9EhyeZGsla~H|_H=)t(So$iF?F!rO!ulXj-;qBN1UCPC*WD@pf>yi@ z%`#C~n`|_MYz!flq|V~79GRAt!GzQv_&AFyZ7crbUZmg21fIuRWg-%!U*%rT%yF0; zHB&wd(EAUzD6Dry$i0`TP_j15iY!@sG@jqgP2KM924 zAb8n2>T^ueskEnCdjF>?2t1ym^(`=D@6~Vce-`jRyxZTA3+B$cz(uopF>`Xnxr`=c zND*n7F$SP_%*~2WTUT8@CA06+Z!YxI#PZgDv{h`xLkcn<1VV=i6f`KrFT?k*`rEkG z=1d<3%P8}NAR?{HKjg@FE9i7|Vl1L>s>BGKUau$0#yd2KMl)T1ZCtDv%_~Guh2pxy zjX5ycC1l1~F2*h(M3u!KSCgI?q61L}5Y9&V51M6>tVm3ehcGH~n|slthv%}qZJ zGAM-i3DE+!Yz79(` zq;922iySV68|XCj8|2*R!cw3zzW-V(jqKElh+J4IKu?XdhGJAt$r!9Wi`X4%mc!Mo|No z?TMiT7`Zi5caGN660xd>ZG1<&ireL<>M+)1ARO-whkNo z6lGW`O=l8r?pkj25IcWn&u81hi~qH0ueI}Ob^-~7hnv1y$RM5-J+sV$$a^9v4LslLEbPAL7HOhujB?E3YVx~C%vip`);FbB;?GG@Kfg1g| zpC@?U(4!Q2EyAixRb}+8N}b!Gr541VQj9GHMN?P{o14SWT%pRA$Bsx;=w-~j5to8s zT_W?Tn7W{oe~5uXiGN=L_&^ZC>ivwa;G7wG3*#fT@KvgyB@1Nw@w{Oo1?1=&&o{~hGP(GD0S$kA8Z z$~*K!0OTk`+f+GN-`IirSHFJ6ox~>o2MWGmfTN=|v>Ad(!g6Z#l*BLh_E;ojBmT;5 zJ~0UF8wap>glm}(6Us{|Bo4vo*#G?m$)I}&%ejIfL82oL8-xO7Vgwt^-=AAe9mO$! zqSw+wSpvq&<`jZ^h+$c3}eq=nX*v)k_z{8rt&e6H-`U=W}MYfS*`09E5ub|$9QI?}QdJ4DTi z+yLk2e79AgxUxa)3cWp4TZ$E#d~cNY7*&G?ST91Q#C+Y)(f*Ju`gn9C$|~>dZaTc3 z&H9aFsh&PuZJvvRV;C~O2WS&p3?zeVi24Hu@jucP%Ds>Hmxt5n0E%E5GO}%YT#*|G zgSjAgCFHRbAIyXaW_oXw#SkV>Qt+6A!VSK*SLdDnmEitI?;`KwFpMzwA2)$1S z3BCt!PW}1`smWt)AEm@CjrQ{1reiBY8T02uvfhi;^+yyetl5tG17^%w0fhN!be!uI zBqqh=&I6jIgZ9g6S!q<4QOuFV-f+S59Xoym4dC_?D(G8#vmsvhU5h(JH(74kHL}zr z*v|fkOQazoPoeS9PpM17yXz#RAEgF*b(dTG!YCRBf%wNj^bBFIDKFXOyRf8%(avG6 z0~&1L^-E{C1o)|;v2+4{y^RpzQ@SU+0|I|G`}umb#aTk*L4*V4M9b6@>AJZ9bu6E6 zu<=tYw1s#~NNw$nkALdhTj|=%b_~Pg5J?<(Hw5AY4fetyy5L|NPmn5oZeK&x_PV?X z3Z(y<8?(TDds*JgQ4-+5e5r0eBo1O;4A_r*Rt{90YX(U$DE|@eY+ttaJr{6s2GDbG zel%ADg5}1e$=Pp}+W{FCj)27~MFP=Ad$1M*>DFQs66LF~WN>VPa7timL5TxE*v>)- zR>w9Ux_l1P!=@y&p6DV}7@aHhg20M_} z#t5id&^wZ#diR7pUC_R~Br7+=$4v(^!CwK@`o{u|8c)q#HLusf*Oxr)>gx2w5-WsP zi}YQUXCt)PMXXVZv3Rz{d17N1e)rmR%ilyQ*>$S>O-QvWiGN6?AeIRmPPbQvvu%Ej zl&TbI<^NE;!l>(6Sg~cN%jwWNfl^iuOUDU3QY_twq$GhsOb1&*2K5E>Y-@WUjI7(g zZ0&^lx*yx>Dc`ZmW?TM4Lw!gPViR9g^i?XzP@G=7AFHDH2Zy4-Hq5w2w7mh135l6` zmp_A&@IE>Sllpx!So_h4@_s@OaCZ3_7)*FOF9&5bvizEUZkbvki)Fq500h(_=_Ca? zI|Mb#h`H1r=JHGC=CoGFRG-qS<@NJ&iFz-D`qEt5HNIZI6X{7G%WZX=z3|6D1dcb^ zx)tN5zD`VYDy6_~-hbZ0{2|k|?Cop+g!1orBYQm)-W;N~+BUDW%#hMZ`wEqy6g!nV zFKGV z@H`Pf*0wPLT!8D?=kihR#-|8=Ofv`4^y#?95ziNP9NW=fVVt<=`a{FyUhJ3Mf zk^zgVOeKaHFTy^-@urQWh(XO4=pHa|LDs9Ag>0Hg&hdVO&_=@bdH5{$3=jXX%LrLu zhW9uZOrr+AS<&KG-7n}ctB4GWg9l*8ll{Z z%czJ8D2PqxuUh1xDG;j=xrhJ!JsxYcgAu}t(k0L6HZV@m3Vct>?rR-MjKn&qj}MM6iP@Rl5*Mx zG|*C3WYDyt^)y^l=AI%Dd|bqU>e&+7sAL_sLt87POef=e?Ywp*VouteNQ7z~h*z(O z;}F2hTm~J#nLUJKr+L{eS2C9PQ);B_XUpAxVfUw6%OPb9A$?l+Vb@RV`$2{D8K}Mq zc6~G7x5)O0RuWr(>J^T}DQMVYJ&;f}-v@t< z*5BgIkMQ~lg|-ft@sBAYlG&7!IDEu^C$p6XdgFI*5Kp59fFpUf;QtWBF*oIO_!KeG){6#Jf1VbrQaSe}x z{rv_MFR>w#BGFKGxKkijan~Hdzw{nZg=gYJ>HF6&!>GGyst_J$^Ff2CZ~X@pGI)3S zTlHQG0mMWyhzMP$|68;Aj{2CcFI8aEKW3<`@Xap2$3c6>m@t#pB5V5hZ-h(?%x}u8 z?`A{Vf3-0Ag>DbFme8~E#-+9Ws^U1(xuq>y_af&i?4wN|2O;z^(SQWMKO0|KS7XE?kt@%&Z9B%(@s~^0YXI#oj+T z!{+(;myU~uTKKk1r8^%`d&BtD$tG%5_Hml2H&?e^? zx5PB}!B!PaeWsF2ec-B!q+JE^p)hd60|;0>yKiH2^4540g&<2*U0>t|3>B_HeCXhA zzaP`z!~7WdQga|h?>HBr7WJOz?p#|`YLX&Le9}y~Q(y78V=u|4oRTIz-Reu|i);6u z1uCZO+bXeh4770rhc_Dfm>Gsz;9L0Ir7-I0;Y$$BKi+>E`B{yu`dqEv*3X|76!-IP z6`VxbTU4RM-U=Qx$+gwc#{$-_u4fkS`FvjL!7UG?M()M{01GN1>L>)bLbKQZuk<}! zlXSY5(sFK3S(X;{#O|x#_xTd3i*_jrRpMBpa3b!vs(LurE}?NDF*X_4lCANS(R$~v zP-`T$qV@h52^+vZ@7pbOD1iXLU?d=+LvU(d5@-OR)?Iym1MaF>_@AbqNB>SEpL6#_ zi8bT!H_NEQ@=D;Or>tM|Y+sRDX)`-*iXJV9<}Kh0JhhHJK3{)>)GvVT(!twnhi$0r zO0Xez2DU^m(c?{3HC)`;tWgcLV9J;r#?Yo{1{Au$p*A60d!CpOzSd!x5O_Gg8+1WRn?x2_%<$?@W0t9$E}k83{0h6Yv(cyGq?A93WnEg7(IS2r-A5)U<`%1*yto>Tl}c z?F7p;?o9Um7KQGF3E#NIgFT5ljxbeZTYWL8|M7pAP8dTh<*cNkLYV~!3St&8M5~{G za15&;2fNs6t-p|bsjn6E5G5`i69;^|R-#7<3KS$(5FO=L8Ei}rWANfw#^cg`S@N<} zqx^ChfB(bKI5isOEu!VFgk3O5$H!V5N^i4}KzY~SHhu4MwzAWRhsA&sd<9g*JB?mD zL37R`WZidXi^B(?7(?gTHahD=KTnmcCUX#)PJv3YjHBY{cfz(aGCu}4xt3SVyyS<6 z6z&dIUVHq|i2?st3ASn}i;J$m1#bh2rv)+ot}ZEf2U1I0?^~1o8F4U)_H4;QQ)L-O zpLABD+A>p^g2DC$o)X6sAa~Ol2_Ss6elLWh7{%;?+r3iViGw*P%i2~1763H*_7#m~ z+~q_tAxR<9c7`|X2LAm1E&8L09iRGN$Yn@=Du|hUf(qRsooU0U@G!8jd@I#wh%mw> zq(-e#D3Ci6X)1aK`6KoS9UARv>0d5N z|5EG`g1=9%K7HZ=PZY)Ls)O&h!gUw*%wVDYDAGDYh~WDH$;GjQlSD*&3En#Y?;p_@5E&kP97^;>2yKcqZJb56IJbDSPp**+<4&scQ06{Gq%K59x(R30lG)M?3}$63bBRN$X#FlH5KK&|{P? zZ5x=UW!xPAECUoBfGiM727IQodmU zQk8>3gX9z$lIS|KQ+=oRbHVc%SHF5G!Z1P>^eX!cgWBkB57b}%uFG19euc1ch#+^`@EGn$2X2ZfXy88= zJ+bz%90yVD17RAoMf42=KLsYKXc|_~=yh%`fsC~WatUCAAK7d>?GL2{ zXrQmf#V~#@pm~Qu%jb{5>og+|;*M`&%hHbtl5m(&1b+JugZ08K{Fm}ZAiQ5l0n`}6 zz-lpxY!lQ|;$Hl&(+NtY2&=Jv8eR~e`TMpgAnElNxgRnv;kv0o~v7Z}U{0SkW8F7cLC4)HQB; z6TG*+>x6Y3OrKFgNsbR}!KrSA9Iyh}0qZLOE072wz1+!eL#efp37@=g(wb9n=DPpq zi^h=>sA<$0Yd8qjC`aaH0leBUy){PzA<9zj8MFjrS)ur$SzzjVIf~N$4X|9smyz&e zQ=?bx?60EwfX9(E@a&D8Ky6MQ()^G=Ch{QqtMGYt`WWm6KY{rm?y*S4dD=avc4=Gf6x-) z1ZpTBU5ORC()I=RK$tAl#H7e#v1C&X6S7xU7ldr+Nc6@N-xVKr1TOG;4C}muMLq66 zKs}@sSvU9L5=dXOW>P^P8JX}O$CRWVkMiG$Xv=s%v}n=SVkpogXyahPN6Sg5HDH9Z zTLUqG&>o9t^yOcchYS{@yi|Cx|CPm=de(wObp=&j$dL9!{frNLLy&qkpo-X92WFC7zM{S$y$>GG zUB1t8ewJcoq_u`hHMSNZ&_~qs2FuZ4upJ?thviG`BVnqPoCirU{yeM=0O3RxgN~($ zIM|m4f=54U;`ic)lWezN`Ei6{6(whs>|!?7V_zrFnjuzwKF zMC5kA#R{N&q>wTBeuhdxK=EL~@h*XogZGxw2P_c*lqLe&_7o&&v0%XFnRbhzSMBR8 z%`I`Ga2$Wa}u}yv9tX=y&7rV6i6_#nv)`Y z)n7HoPZALgykZBp@m2$uq50u^S3t_XhCW;^q`B?z9opLdVg4Q&0@wYSSFr3Gu@G(^ zK+}6HFKhZg1u+x#&$emVY77l}S$kfC(s}t}JsSoAfCZv!`UyZ+T$(xLWD|n5OTM-@7RdvusErpa0ZfI-}Bw39AZ{lE>OVE zreKb4Od;|G*%}D<9U2E(*l-L&SzajtB489iIy4J;l1LjZd_-Q5tO#>x_sCvV2wc67 z2?Rx`XizE?VFQ_L5IMdS#~lX*0(36>@J zB+(Lzcpj5!YfUHp4p_Ar5FZQBa&|0YQFV$bRWmad|%P}7!* zDk;NSge+Dq4#mFr7EZ@O_eLm}(9D1I#X1!VDHMy-a(d#wzyJUZk|FCX1UdN!^wtMY zzaFVc_3Qthl%MLZjnb(RcyFcXn4YP3lGQQD%wG-S?=lXzN~oPG{E1Z3L18(PCvQt^ zx2GSEB@8auhwniK+bx_Jqoe)F`okx=6p6+RHUM#%de3*7nf%#O@S%q@2qH{)OGM+P zhkFAvx3c{bK_IkwC?zx`v4t9H(z$WSNkR9I$7K?TGs%k$Btm3WTHXE(P`(4-ODk)u zZ}CJI%tXYq5n%ZZ@0f!4J(36HuszL3vPwiRv^^yj-H?Jo2vUV(rhNHw4zCA4<$GZX zt2FuPj`u-&aYhPRS}5@ zq5q;_O>QEbP%LLhQ!g<`a?fB`)Z&VaK#LD8;W4HD`Xb6-H7^)NKj8Q%Q-NV-S4a|l zXm+@G7f=i(PB+p*x5Ty}j zLOKVvV9JpBcB!`5Njx8P!qm)iwV53nDN8~SJCO$*jRP?w67s!}fJgvFfan1j08;>5 zfrJ4SfiwXgjbVlUH?Dw>_e;Bq#Q?#MML{~lNJuCS2Nm!g;FS;mmF$7{5*AF!Afb*y zEtG-=KD&J2R;-xUdI%YpGU^v1Gk4=UE0yCdJ_7rvxa`ZV? zHYOA`JgDcLQ0!TT*&gKvgwfCiS^-K|wbx1wB%8D_<{~CXTWt(POp9oxq2LcY(*P9F z!Tz3M|NjY3Z1sN;;t6?&|NK1%VkROXr;aH3@c8%hr8=lKvh>h&X@o6|Fk42ntf(2< zNVPzP(Jnf&%gvdVL6&;#aX_H8Ww9ePAU6B?yUlra_DCjUo)Bs4inVX9Rl?JdV=86^ zB#uX-b`miq@>ME#u8+#z&rg*$Ir|vQGQ+#)5N5?PsM`s=^!5V&OQqlA+Z~Rz@Gr9- zQVjVvKEL*SG|N-tUZ@Pzw~)U*d_OJSc15BDS=MlgzszkkILQ??o58`=B8*ISH&n@( z>L8QZC~Fe(3lzsx)<$8dPX#o$facoR7o#RR`?%uSYGZvqRP>nz5fI`?EGeVf7Vvk( zs_yp(f4I#U%SqGJ(MW^6j`o40efmP^{?YPU?nEFvF-7}FIr6VDh)H45 zgP=y&w@J9&*Tuw3wccMb94^O?2&^n=IJ$8w@hBwKRYk??F%d`Y3=+G=V`~<@`G1*p zMa+m?aRtc~3slhYq{Ynj+wBi!`Lk-MKFFFY(V&;)uT$S)QLYh*SWm(IVKawN1#P}bRQ#F!8PKE;xGhRavfXZQx2dIpbQ z>p8+O%o}aN+r!=d1xHi)e}2)E6N9nn_|jkBVr}o zBN?aXPQ{4w9VW|aED|bmjTFcUoHUcn3`_M|R>x3-Et{-h@?U2%K`I?k_V)G!H=b4)U?|4Fh1=?fJ~@)MRO)*y1!8 z(=3@}J?eYsQ65apfk|ga($BepJ{Ynu0Fz(q9y*oFepS|0Y7AQ|qej6MeP!*mbU~s6 zqlT`xTVniI+4}OO;2rpcLjwpRg&-ZsC#D2pSEmJy!etHPO=o?^E5MYbA%jgAuZF?t z4G8~Z*1{xZ8GcVOq%6U)?q?i??Ff9uqVaVe05X} z9t9Y>(bzdQZNDkkvgq4zIwCO6q@mwQf7TZihNCzj<%E&}E_|&$?E~e0z zxVfoW<(>`YiESOWlL|O8shy3~gp|=Tc}a|%#7;P?7(|JRD=14FawgphV^6vG>-g2O zpic!1JC+ZoyPT0-D3a(}=ZxeFi`=dZX+=suXbi|ggP zl^NUIBuWmS0l~8=RY3x#9k^4`9X=~T>PHD#c=NGM|ZicJ22h zmGck&9|>T7m!Be&I`_E#DtV|5)aLd-37et!_#r)>?&HKeJ)X<8mD?JKc0(Os7(%ud zlKt*q_SPw>zzP2d)*f0C=4f34Ze3sR5>Ax?c~itOCnsAR5r?(K%YcSk>qGPO0c{a# z1bwO_w;?5sURB0N>Es@2gg@+lWSS>pO7Gt-{iwtdNwvd_prb1TR!{Fb2v_JD2cUfH zaZo;Lefo@OBN_TVr^{uzZoREF4^TJWjX#KoISNhuF^MvWAzqc>5dlnce0)G9{1_mv zM?!?mK?6WWI~!h`vW{9F>r4;7@r>TiV&t{4BV*^xgp~h1%v$_XbhV)Es!T9{UEP1* zXfOA^$L^L}ph6xBhXuu8 zhNw^d$VHBM2TZc{zbYm$N4{S0kv6ocIHssAf9DSfT^H&Xtmr6m&R6`I^ng@3JW26IWqDIw|kZ}lVs%Jsi0Bb;Rre^r5 zUrAcXangw51p%|0^)~Lalb+z*cUhv08329VPFnH>T2dR={$Pfr|9n`HtNT)`^D7hz z09T;{gistnup>hKVr&37G~^azb&}DSXS5MMD_xHG2!g$!mRl~ze{Yn7eoCK8hL6hx zO@r<7irM!T^k{lTGpNya4`obn&ylQ~9X+4xE}(=E5gbtVAZT?%07=E!7(a^4lMC3; z31#C7$$1V$yd9yU_*{(E^j0`(4~8B9do+gZNqh05hJwoZFd?7H8#yE~!etm-RVU#{4WuD}@pLt&nkS#f5MhY2NuAhA_40O(E zgl_pPPbg#JE2J)4-SlCR+W6S<{wyi1_J0mSmu=3n3T)XoUJjT zXh#hpL#j>b&Qt&~1?z)8@a*@SpR~5@fD`Z(;F_KRWdUwAbo>Vg7iA&|nSxs?)Q}`Q zI{!otOOyW*P@oQMzliVi4&T2cE7)ADO+M#e(-*{|H5juzLCg5ZOT(y`BD7+7T5nfFF&XgD3*Yj)hMN ziD$wQvWP!t!|}GfKxYWE@3W9j1DgCUzilllWJgm#olxvb8cq{CpOEGowhGJoX zZeqDk7jH4DpW1z1((LYat=#C*PZ_jvkeV(|#IBd*_YPr&=iC_8d>m(atwAi z1yoKuP#1^XthY9^rUI@Wm5;n@Sc0XtSj+nz_^ z3q5Rz*`@F0?VHE1S{T4Y_#X|>b4X1Gcf+5T$H!X#RclCI{$zU6FM&3rZbj1YNGZVN z$Uv7>z#fxEo13sePZOUKwI^w{ise3iVljMT4tN4q#Ig@d%#ZEHAjgRyR`?T3p z!(67D`bcPI?o3K09Y5oHymvkLPsLWtpY_oVd0N4^!Pg-H7-LS_vXm%ZE$-%_6)RxT z(f`TmHDTB*3gxq4hp)x7&LBtWM#jEO_@7LKip(hh{?UG*4E?fjaS9pZMenB3ooIu@ zQV~ma?XZQO2hR#Jwa3&XU9G>2MQr}03cpu3wWTDTVxbzZa2$o~uo_^kuLZ=1GYanT ze7=_;M_tQ~Bw7t5g+YS>h2Em_;NbE=;5Nx-<$!|&0k!Y<78KU9Zey&QlI{2z$^;VI zV#2wIA=F)2i0--VvdR?gXr|lccPKv$@&OOT!586*zY?bE2j5+}CJIbM*Xj{We@=vI^yOs47jBI;N{v z6P8~Y?>6_TS@p|Gfp^3OIuD@!gQxi{|34~r1#;_> zt`o4Aw#kjIhEO&VVBSG`NEwchS)0pHa zqLIU*P>2bg_6ooED|l#w2nh&~L?VO*1P^vs&0cu^%K1VMoQKKp+?80-SmM_+s9unl_ zEE@*Fiv}zk269t>kN8X~{j6Llg@p@5VDDw7S{BProGFNN|K8ZEx=@n8W9W!Yy8hQ~ zaZjy3vBp;VB7coiQB>V4-y|r?JRD@h=5H{4Z|bUt0SU!%{rk;ByOAuaCJ)d7Unm!U}x-0B9;p)p1XkJLn35$_1_1j z3R|z%w!Z@d78mco$)(_%MZTzZ?`;6P9yT`PM{Xss6ey�o=*EdV9a#$J0;GsZW=5 zn`vYO!ARmS2C5MNr8n(po3tNnM598cT#J>JX@!?!jI2Dx30Rig_874W(V3I(KKKm(!*2@bhp#BYTn^)vqdsF*rFdgZDo%_Qi8qK z58WCEg~Q!A*d9q!YKk*|&XOy0UGI5xlUkCfSWscCK1u<;mc=*t|AllIJLF@)b?s`# zJy%Bag{~4Y$+!`zrH*Hf+raZmJ$z6W2dOjt z24cP7@xwWTa$iipUQz;$0|O;8ix4bz0^^E2<3c8wk%>l{>4bfcp1y_X9Rtul*uKHA zdj`SK1$+TN;Ginh8n&ZBwppf!2GDSQDHmS|g*yut3|Kr^F_PzBo1-x;Me)XrU}9q# z%XuEPF1?8wg%sQec`4XHq>Cv2A1fPvLTQRS1i^8Em+ivMM9t2qZSx@-I|P%B?e}DA zP^E+UYQ6Hi;rZf#)7W;kzcU_B`)ImAkI-oI&>>p}*CWl+LHt!>7D~v{QmPBk42qfu zVTXu#tPFDTst&Ds%3C(aW7-nfrct8)O&{VcBy^Ucs|kA)u@n!MkLo1W4wp%>4#Gm2 zf1y&?;bS5@!dU2+v6vqrGni`{0cVasV!9AqcMl6g4>? z+O^`OZ}%~K2jf`@Bg0VI=r}=@DX?ZF97+E~l@4;(>&y%wQPO3D;5^e!e*XL(*P|4_ z0o_W6(3jJY%l3NzYFov@Nr8%SaQEM4v`-dAenunew?peZs{Fai3=LvQ=otA>JL3%o zj5>9DicB9Ufc?tDOtVRPNg)J2e7TqJu~3=ztjuT!wB?cpKBGJ!`ZRs}&Hg+5D=`EI z7TmCY+MaL5e#+W4nZg9Rt@kVPud-Ps~kF<##=nP6&comcwNLN=b z4!;*+XcgtPa{_0?j*5=TBP{Ut-YMsT&d*)IhCFZDoc;FI@mzLFM!t z|0r58_>z$Pn+NJV)(>F)gJ9S{Era@ngs^lULf7xhsPJro;RHC4@X;5s1NGDbEljy% zpp>5Ac7)v7BTvHLE`jd+L-~I(^mcdyKt5Mejps5R5DyeE@DdKs2qXYj@DS?n!O6wX zu|rJWP*?McJ$EC|CvlmzTN z5Go32jZYuiyensem9A$2bA+LkQc8w-VuL>yV>3a`o)Oq+OLIR&5 zwxQc<1L9638~_Px#y$E55T@2)|A!sJ^w2OL>7ncV(nSQKj#(zTZ{>U^8l{k96IMry zlZkPB^KX2-zCtUZgUS0JnSOp{)`VvpcSW2OmiFuG8v+YBLRXF zj9olQz#J~m1AzhWfZi~|A|&m@lD!o)Am)Ly$gkY!rsO0-U4j6ud{CIP#R-cw{?BYY zhC#zL1Lr7VxJ6R53v#nh@|Lo~c%4Fy3-aVl%anY^eA5JK&mF@N-FJOB70~jzf{-1f zS@95xP&n-|4I9sW@YA*YwV7?(V_6%PP>%N8mLN99$@0HkQ|M?4#LtkcKv{QYDoB)q zfmV)?FU?UeK)T4&1px~+%Y9(d$a{aGUps)rZhv`c9Od!Krrn^M+Rp@Z6^KI&E(W?j z`%Icj+C8pSQc^HmgB|xOPea=%tqN{2v4CArHr;`F4v)0(CUf~|Kcpf#=g`J^mcKET zdci5h#@}B9w90syBEvKb0ObG&z>c&>etN<{2n$I%*FK3n!3HowHaYthbjlkEN{l(x zkE=|(qDC!Z5BM==Zr+3{k{t^|=^M+r9}sZ83(Cx_Mw1mujV!5X-c#916b$tC>`BJH z!hHTvz@i)6iRC(9LkNfz_N6uWA1#mP7Lsgjw%f#j0W3TK31@cWNYR5CkN&W5iD%xA&or+|{NzZrR_5F_8H~foT@H?`r*hAVy z6cmavo=x?Rz!bo@dM@*xqt;nTJ`$h7tz~9>sk#ILL(z;;0wo$j<4^%wQNRX|e}Cc= zH}YGQ00b<|qPoIP44DEdX3r5cw_vaV3pT@xN7%jq1hyRx7GYxtpxjf0{r`~cE&YK@ z-Q5>M11|%20DFIj|lvW5ftOa+>~)e)nIHil&#m2*N@>-v-?e;hyYxb#UHF z`M9GKPt+AC=*%?{4u_&|*1B}QA#_Ue(Y%7*kuxC@5#$%pG!H<~I(PF6<|JZ0gV77< z>BP;|7n{?(%yPGxbu^zdBkL`ABQz-_i zL0Kz(t`oKU%jvlov7rkqRqo0(aF={93&`+_p|;L|{SHja63L?n=;JiqnQHz#HsgrG zIpKVC*omuw8m4v!`<2ufgJDJ)C`8Brgut{YIeQ##yxGx-;aqj44HRE=W6ExlJa;LR z*2t$^9A9JsX)pNk!T900sFsncS8zDcv7;B5L4oDf&%K?0e5tnBu!sREL&32E z3~-J|VA>{0M_s{)E&NIjg6v=(n1H=754*QSzujL7!C7Pd zDBC2HWu5Z2VpJOk=F$ExGcShH2WvO=|6y4T3o{bqEOhNMA%2USi}=4?CGdy6*5Au5 zes6ImzaByN36r7DWBD6hYQIrpbu~La`9BmQ#zpAR*z9&!kWSuS@qL5R%rljVcQr0T zj~FdW@kbK)FtLM|i|QqX5tzafTklR=vJ}-(q2TCXJ-g#5POK&lj*`tkO$p`x+^>W~ z0`(V(aD>Rm*S-_g5Bst_%E9&^`wO#0jDvkEj`Vi5N=bTT(&cXP;=wRzG)-Dz!l+=i^qVU7A@$TH zty-$sR4ihM!eaavF_5<)o+J}zI-=C2@|Ok36cB!-bR#33ni&_OKtwZJmhW#(k|I(G zNFe%y>J)-f2>OhtWHyg)CV>BNnL7qTM86WoJ%eEO4TFHtJp(}gQD#8UKK__Mxcfr( zhzskj`&P`IAm&xmR8U_ z)hs@$T3s)ks&+wsKA=9P52%l?V)_Z5%-5+rXhz&1WEbqvJfwlnN@4zS`wE{KXgRzF z4xIE|yJGerg`TpZDqc@E^jBhd=sjRv3PuBU9)_Ux$Zo$Q^}BIvcSUlsr=+H>|8JS? zc3S(i%pyFud3Mn7%@Ct#h-jUAjQ1zL)qcR}4;42E7??a)+a2qW}4Nrly=bX=VWW$Dtay@L`15*>+Rn0;w_9!+zY?0=W@A!f$C4TF~Y zvh;e0CBI&PQR2Vq_ zE#nBmoR$N>%RHB$+39Hg3>Qm5^bho}I35GTDd9qAQ#=DgPDi=uCm(O~Lc>UxfJ%3- zw|wn;A8|j+y$-k^jur%iZ}G%F0=sfNu|xtY*$w^%*f zzm}*hIDDf*>_LNs1zy+(NjHxW_S~44#1D<3X$~s-oX|elh*)1?!JoWvR)FC8Pfrm& zvwm#u^0nA7Vd~)4A^^~~Jg=V76zv<+WCOU=r|SZ+BrcDb*hqi+cqnsO$RNQ0N2$Pj z(8?sspVIFtSx!S(MIthSstmDV23la_UqO!Kl((EDdpqS_v_UH)6IO8X&P8YaIG55U zm#8Zs3(%22Iw1k6XJ7UGk6NZd4uc~6f8A1TuKrfKI}pTbgk10Je$X*YSQOMM-5tAf>npUN6;L{Y0|lW-vH}>vP!hCU z-tGQGuE2rou%}H3B+EUeK|raFC|?zTvK#dLez+0-UyJU0<^DE_Hk(MSC^(EFskT{R zj$Zl(j$P-UAnF%m0OID4Add8Se9Gif*SGMCwEb?yVY2B7ETZRGj4wd&%J_FwcP)LY zlF^e|%YB$OQVmUlr&bw_3&Xva*7v)F>6gbOvTOY{A{NJ(SEILYWC`o? z$0>``qKk=s;5wS$71}Gw5pii_l6%xx!xkVV8lW`LiJfzEW996qg{rY&K@YVx0@-vtDa8_Aj*7(^h8TA5WS%hT50!&lpS% zbgkcJfY+paEx!pC-=b$k;s_3`&|);n4}yZ!RytU~c3pAqbu6?S2F#?WE^$$Ia@lWb zmc4HU985-7L1#}%pTtqa;m_hU9S6{KA7gmPD$!UE-8KucA)^dbY1@26^esG=JJ5Hd zE*z`m=8GsHvpw?ngyYp?7hu@jH*Ih*rP-sBj;kJmvc#}D-m|zh#tCDL99DCew!r0W zD4Zlft-B+_gC!k}Y}`0#%HS38*Xlkx99UzMoC&R&eetCPa8e~6EVIkZ4yGxP4pj## zgR4G)e(!5p z7(rF?_uEtXvXI3JY^6hw^{3h~xwT++S@7>p=Bdb;%-O97L4YpNkeA-W$sdeBm`W^2 z$$uOiOM{gGbKHwE{0=Iu7_Y_hwBd11&2lanUNl@gh7EmvMuI4^kF+mnfZ#Rl4 zaHcxZ|MG+%%Xj9-!{M+)4UJOvcakk=`%5m{N+l3T`-^pg#BnSHRhdGX@b%Y;@h@8^ zr=QeC{g8sDG(!9^JNZn?;{N7eIswYG-3x&tNgcP_jVA6{QEFa=5BEm8Ds2}uC zK-1(MJrIyG5JALrYPbxhWqx09_!sHHP>#7-fk%m}g8=Vei?Ny-g5pBQB;~u}oqFi} znY~;4FuM2h40p8sRaacMV?zUgTm{6QG%C=0r6MP+^J|b=56r$=a;1PS@C`0efD5Dp zLqLHHfx8=YqqPnVz5+7wf4rzsF-&N+t+u;*{tI(hN{419|GxYw6y&sOVW{n#&(Hu5x`G?m3!sAVLP{gJs2VxW2_K%~t zMUiI5fPcmtkF+3t9AAugrp&p5zpqG5`7*z0^rAu&`tC~*6*~U@q|o*b0w|H;zz7!ONP;|}~VSIy2Tm9BI?AI~bujTY=SC_;v&~#YH&0AGfTPXpI z9Y`&}DS8jY!UjSC3I!Gwlm$e66ukpkN$>UgZHA3QI(rfs^_mT&SELI;vgP}}Su11Y zH?=?o-3H^>xD{&>wyN=~YSK{Q_`Zz9kT`X1TuUrnn>{U`NF$=0N{r+ekWud-^u?RA z==@pnk1&oRL}9GEZ1&0YFJNG5sb^pP%Rud>KbOWq3)gvl)$w5}aIG_^Q#~dTW!6UY zCM9OkY3|C^R$Ar>cIDF`bSgQCW5a+~K@dO_cevP`;O_FNMl12oSz=XerJQjHBx|(7 zbS>|Gb3iw7z9(p0c}@22@QHx}sPbhI)=}7lrzUk=wpH%0;P)~^7;yYS@s?@wjpd81 z5KKkH00IF%2e2dC4)=x>p3FU7Hv(BW=7)bD$eK@C?=xL9+|NWT@eJ-8- z_(-}7K>rpaVAwsM#lF_lUNBE~-`Z;}pmNq)zTp@Gp_f}^~#dm+AarHgstE13>(@NQy0k zxe#&4zYA6@(ddFm(V4OBA>Jp;77)f31JFFWPMK3_N)!t6lGrYpfHunD`3t>tg#}=aPjs&Gi-D8#4vosAtoOI?|(c0`wzh5 zQq|gU$IJUX&LjW$TgmLY{g`peNY^6Fa4D*lv)a* zi6QJnXmyrwmkH8EJ6*`{Y_N7*T&t!^r~` zfCUI4YzTpLu>Knm6Yx&_jhCqe8@~a`gz1v{5;i&(66{?_zcK}i7zl0zE(Td9LBIfk z(bHGX#0>!oyCaprxC3Q@kYY_BEZ^Y!E50ytjbTW(EF}y%b+X*X`e7iSL(=TC#B{D| zW5PzLOxVgeq*c(6vW`EvN1>taH6EXDk*EENEa_-w4M|jf5?sBwelIs^2`N+_zb{0Z zUuG@~`DTn8JYw>yl9)|jx64~$P?+(fbQI8?bO}@>4u}_!HR9s$m#QZE1`l;qKx?uV z-p{{CDISa%8E*KJ^d^6jV2B)1p#2`E5<4KMkeC^|M6qQON%k8r;y*7hkZ}=SCEKCF zdqeW}^mH(rvwoV8O0GO`LNzN>V`DV^W%hd(RATPcVIf#G&DOC7DFxnNW-cYdI6hPI zggMQxI<$*3&9XT z)g6yduSDf8gzwp#iueEk1&<-`JOsJ<1N?|q9q{A!4_@!T%g|P#++UP}!}rbTA6Lci z*!9caztvwC?#uaZWV+?%1^l4Y$XlUU(lHRP5T?~{6&PJ)X+0D7t|%Nw-?4p)6HHliH}+S8itF% z@qtX%mMj_v4IaU;e~joDuzCh87^pT6VEDCaEGuR()r6oXYrxXvFxIyl@$ipw(wo;* zF0N);QS%+?5AmJe?&q{$+(5ndzNB+B6?U(MCpe8mFE@!)_6EkAJz=pkVKi?pP1-<* z-@Xd2+s1HhiE)BOm@fPJslW6Bfgf`@WIYVGN0&t zveuUBNNEt2)l**hL)qmStFJCXoMbu6_wx95lVpiX z_Yn1<=-$E~3Rugk6oe+;HnM#U_1QuUhV~NQTF)nQv_0lsk!dVq_1A`lXJwRk(u^xy zseT(5yN{+#CJ$`OXx0yteladD!~4@4EQ9Ilu!nDTy$VPe#cdFFAN>Usbz*o>E)LwT z5pELT9{nFtU@`B0-`y*dIoR%k+wZl(fR4{#j9$QA5abzVfI~s2xLz_8L$+P`M?#C? zUCPD!1d5C1jrhPzkctPOVV3V^&f~0BIg9BG-36T=#D&%k@i9)D5c%PYz9M3Gn%keC zvW3?|LiPSUP_R}1pyGwNP&OWoiBnPs{5>Tl>S|=15qdzwTrm)}6dKV3M2hB3yM4C* z3qL~A%L&zg>|KALvW~3WC=~!+?uQAK@py4tyRaqIY0rQ6P@YG9pH>#g$M?bcTSQVX z9`I!kra4epP>9m;r;`fjy#K^YfSr*2l{%jLXdQeB>S8{l1pI++o8rR!aJcOtke8Rb zKV)u-PRjhnboOo5@TSAT28a0MLtFt1JCE1#59MM6em{JBTF^<|alhSMd{a1l@0Y@z z(_QW&B2d^v{i-}s;#%H5`+qgw0zYH^2=$(?fm`83+XrSJDJrrYXd-$dD*RFp1jPFA z@AF&#{qR=u4MFh*!wn038HR4I&37B~=uUh$WV^3V{Zv;kYJOt`hw+h5uO&uOr1kPW z;X{+-aBsOTnmItQLjLNQ$PEg*-Xz7Q%|sbG^a+;7bWfdykvcUT^?@V#

R~`nA#0$dH#fO(m_l)QFwRmDnOUx-?~p? z{B1Gj@4c{9+v|4T-w%w4o0o^YRri~-I9arA;&o zrlr=YSNa~VmbIFfsa}!0B40(DJ!=uw>Xm+yshF~hd&R*f3pZl!EZi!jrA&t>c1Z;h z57t%)gvku0(h~N4G~H^qOVX}HE{;Fv=6ZTJqsWcPdhveNye5VDuQ9Edxx4omso3xK zHeZli-S5l3#xVjJ4le`XsrzF2M)I#}yivS_cG74NdO9C0sMg>1%U;dv zP3)alDRZUjFJSsF%WISUSF&jRUwpOuek7c$*R=>m6oe6`a@_QD->8@~amt@pN8oGv z-c*u8!-HbOh&p1hP7r7)jN2y5+Kf2WK}D@D6E~cqM!KlC%d~MG={VvJWOqJD{TFH$ zK%o7V{>yr55`G|Osi68|B4SAgqm{Qja9AcPgbM(4!H@?u85%4c4c0jerx;1|cm+FX zo2?xLnL^zN)pc0qiPV;buj>!j133%_^J+!jM9<&MKf{M&T|SSd{m=|V;|!EC_)`b0 zVKBB4jeqkG|M*G|iCv!jh=LLQNlo?0Cr9k0zo_(9FCo2ziv6=6wf!spkXcZL`9$@% zXvGD;4DX@&PEmKWm)R2+LIsdL(*GNYsZ=90D#)4JN}rGzJFlki^yRxMr!3|p2;fRR zB=~9k)NrV^J)<#;-iC<3I$kXw4Q{(ZN z^Iw|}fM6R09C@$OYe%7nJYPt9I|MkdNkV6q>N(Ku5udX25F&2L{hvv)v@WS~d7{Lc zH+*N>{*Bl_TU!^(NTjmgyuZ|V*vkHg*!$8mAi20G$t?d&dSq!aoM6rK;x`|!#!Okh zAf5N+?o!IJBp%xvsJYa3x^A{hKO{MO8gB~|81A~$!`x+tu1^6Uu+o<=>vJyY!YaBRb+&|x zZv(sGW`ot724M39|Go0v%P8YAXCOpEaWSCD>z-V$@x%xr(8!VEM~OP+5%T0yCAPc+)htW zDrT{^fas!25H!VvpSC87Is|JV5MNS20lNwF9Gg$aER`d-1=XqDVa zbS}fouY(-{4lX#>8Z#9w^mBjt_*WWg($>7G+N5IU|KH>a@a~y9j2eet*OvRWtX4WP zHDexS>+ycie7ow`i{RS;I4bYEAPoo8t#_+wGK`EUv^|rzH-A6Pce?k!SVD`fE8FrD zdV6&Z{QJ~if#u3CbxV;832h^lmW*@w)T6|Of?-M*$|D#u8|e3&=%@9py^BXJkfRJA zP`t9*CK$1WTt_Ceanm3|{J(i?Dx*EZp3MbJRKuFliqJl;>Gl5V7yU}TkMG}?rYQ)Z zuw+0)YJlA1h-Ep)D2Hal&M|_%_7V2s3O|&IALJGH4<3-7yb0I z*5#!=61}7T2^02wL!f^DVv+e@U2R7b*;WI}!1>^;$1bNDNm<0=NA4I538}-A2iWwx zfxonhk6gpGxQrhcc#K6iF~YfWxY3 z?T$UL$!k}&@Sj1AMU#NwB`MQBpXK`eLVYZ0p`@jxx&C($a8@Hg%1P`8B*1EJA;5oP zw)-#efp!l3aJa!FPy~#f#{ni31-*%u@!MWoXqR302%n7!+!`d*Iz-uBaBMt!-(@el zDW{Gr9<7hz6WZ^PO`wC`6T6CdZ`z4v?PcFm>nN&2x%NUhN2Td@ONke^$d>=Y9Jvnv z+2}eB<-V;&^23ynAMNthH%dfA&?ue}-l!Pccyp$|mW2_;ry8R{CFvrvyNtK;>sgCL z9yiet{ie#(RB*#z!e>`!e!U?5zQXYgchM)3*QSO>d1{3db0-#XRdf+%-0&X`gR{W? z8!B3{-Id&1p)cOf_oEZxBjKpoNZ^@vRUkEKfplq5p36S4XefEG+(k2OmNG|+gY zVIN+Ig-5~p6`CKt>+uIH`Pcb2?btzy7&3lL@9|%m1g}J@^ol(Ljv3Q4s2JOAjC~JW zWix4F8l}VIR9efvgcCvNZ_`Q>Ed$tZ-}xxoUx>ul=OytGp_(5k&>gm*?n;C45O(Qm z568{_Q8zRgRvm|4%h%Fhy>M6ib)osaYFLYjSQ#e37nn}PjfwpwtA@q@y_4-7Dj z9?T6g-<{$;6ef5Ug#<;Qm`lv3Vf!dgGAzCm3gn2s9+UrWX}Nmi2DBh@2>S~7167de zwb3*P?&YB#id*MH$S!lo^zfuhn&Ygp4M~!|q`3bKS z#4H8Y{Q@1J>+zoXcR6JcWP})~jE3D$3SOm^5XiA*kc&RSp4>+j7dtqEnthfr?$t=q zC?JD9kFRIw#By6yOs%k-L?|l4_%Iz!L&G+O1(5NwLSVd@0Q&h~QbQ0@uu?DA+Jo2o zQVB-lOVHnf$fRRzd3(F}Xu{sE+v&?bGdL zcRfgtau{I*E1Z}yNtxlrttbpk$l&H{y$86QtiiQSeGSBf^kNXgOT9nj-+%xB4Ok)Z zL=NYW(nwBUBJI-1OQT85K8NHdy8l&jEpOoSqRF{4Qy%0uMb!1!U+Z1fD%Jjos%ZZo zDLWQCQ!ZV~wXfwlyu%gy*>#VJS#XZQ%p8h4CpNDJ)7zN=qUa#Y|N3QyXZjaef0p z_KHTlUjv(JGsnvhwy1L0|Hb$3)L|^FUHMNLet~p_mv`SUc8XFy5T)Y=I^%9f+wjk! zd>UVh|82tOi{-(VT9!zeZ#Q7hMO0r)q?MJ*FR);#IW49k3CY#A`AiRU~d0j`#6Er1JGmwNI{t5rU&uI9ei03FY39P~e6X>}O zc=KOw$K9-qSeN@OwTK8FmxT=rFGESCBdKGLmDpSuM^KpeBfC%*g)I8c7rF7=#qzsJ zjRXJ4$%R4!l^cnzgX31 zet&P3@PDui!BJ!q%54F>_~DeLP|4fph*k}L1q{KyoK+l)jh$ZD*>X}+X$B01S zJ8S#y3T^s5KIhCQf13YsMAG%i7>S&k)0jX#Bi$9W2j56p@JQ3m-$&(GX_~EQ+qqwH z0S$xf`Ii9-`i^=AjW#`l+r03L$@Y3gg@17)BNHHh$_DrnX zT)iCANJL@`r_ea*aMRr}K7*gr&IuX4N_}Zew?Gbv z26dH-ItinVFHi^Ri`MT~5%s>tcM<`w{Ue7^_gA8SuNrM#z(hLU*D+mA5fQiE|r+ z2<2yOHYHH=5%|;-hN`%If*rA^OuyqCthUs>4ce9H+xcP*e<$5U^6HLb|3NRSi_3^leo(>i_>+2)nJ?xx&Gyy_@#eiy_@!ymRsbD!P z24m#P)B}J48_c0VO;F&cJOhK6k+12?w$8_fCeb4t7uHQ&Nd!=#>L3HlWD0T$TDkmp z5Vt}}CZ){$LZ1jBRro8@?;{D>1C@Z}zp+;({JOgPIL?Fj9gS%Qg_kh;-B!7_AGnlM z7U)zyC*<-OZLXpUT4kr`A7H(K6^NEWgPfiN>Xu|DeA zVGI#MAmB8JES%6IgC$D`6WTr?U{l|Cx#JK9U*9^?w{;W^G(2ecHoHt~E4LQgDkr$a z#cTQk?i(1a0@BqOV3Mcx-c$)w@88*4Y-nUSN>YNLs+Go|9+R-y8`lAhf~S!;SMEtf zS`FuZiarRWN~AlxFa7e~0QEp~g$y-zII1kAwxRAG;U8-OzwJ|xOX@+!CJT9OQxE_4 z5`o7j)V_SAY#Fd-$H}3=mT46&Swb5`%XKx_`Vc#yXc+562)Hs2we*H3$p|d!8Wc1> z_tAf!n0;^mZSd9x&Xzd_F7jJ1%!jO`z8;c3O`6C@$i|Zj88(>BeS`wG>}aOG{jZ6k z(CA+hUL?NCv9^z-7F!Rp!UA><{(=51Nl5S?RIq%SKS&@dW&dxdE|YyzNd!V~Mt~sF znggVoID?8!1CCLc%auGK7KX|@8Ym$G!oUmdg6t&Ltavuqy2XZ8-^$*9tEL`QgRs

^NwAM1HM&ULZc6^<=bf_#EaBzV}q7TrTt1~5myq% zw0K)HL=ZL!=q|r5W%Nc=!|biQdYlo<1b&xlok+f#nh@8U&PKNv(ePoKj#bOnm^M~; zM+AEUeb=~#RyNUTGSKZ2A8SGF6v&V^58}_9k;!0d^25F2BGmzeBTOtggL10GO3thAVERzL-q+67)JbVcd)koR<>}LqUg;FtJu3h+nB$>nchO){Ks5jgSj@FBe ze?hzBwlI;e7d{D>Vr>x>Uv&<*kA79}<@EfwTpShHKpzmpNsgRw8fTrdD69u{57G-i za0A&MRQy!rcRK-gc|*w{J9^Gk>M*EKe7}B+>gpa(F$)TT^`MRZkked#NFS1*d}S{K z8&BLSerF`{`~lm4;I5FrM^1p|ryodYf$q z!$r`M=cvE}L!NO^4h6w%5Sea@A1F@Oe=YOfG|)^#58Jo4C)t_t#EeVDt%LNbEMSrn?R*)2DSpSi5ahRg9gDSL7z7HV zG~Y3X^$GJ;YE9@5kv@0kfCdxt0+sWHgq~&cY$0%9Q^fnesuCRlTzu6}mD?0_Ew#iI z5NwVaobtpWz%#oYkegm-mrFf2#35ZP4)M5T8q#AqZsr&hHF7&157kWqFNP{G+QT;* z98ybW15)HjzIgCj)9{-%qQnXO?8&rCFP(jiZB=JijXu&{H8o9pNi)7t#MrzYsA_fx zHb4FZTpyC)=@J8}v)R|D8VI^ei68OTA^grqltgNM&H?blqH5o zxo_%AEKr@FYq^W0T(2q48^XWnTw%S$s4$6_O~7qQrg>LY&}T(C3TIkly2k;8Z9P%o zQb}R}Iy6TgKZsfDr{UK^Tv_g-e;|SBpkwAMqu<-G|`AU*ywE(-CIgPcDyxPRmewh|+v8Jmh@ig8}8!AoA

q&ucB7(uZjHU z^_NhgKnQqHx&R|V!|WyQVlUKt&zSm|*h1Yq3-lhG-z_Ax)(`aHqr<$ghXpapDlSSu|&{e!3+HRLI2WbKJd2$B-=Sl_b+4R*E4y&p_Xfq>2$ z=s%1tj)Z667drE)D)t586$ZF5JRZQd??F=}B}Ll<=YB&Smn6DCh(=0cN)?MJI(L`6+tz?XdPq_4G`hgk>wgUtW;aX7d4X$)T)?O&XL*+lmQ#dJwMh0|L z6@C9?`yQgPNVuq?q7e}UyAL}Z-xc)aQ%^+`VzGA6^1F*NDnnBej4@G}j8W^ZJoO*uD!{f~Zf!iZ-@YorMU=hi*~`OXu4P*&`YlXd+dd#^sd;h zU+eDF{_9a_G3YqVpzOA_dP~oLxMJM&&LDB%C#RP-jKBY0O-jjv&8_Ky-~4B%(n|aF z`4Oo3p)-u(k<#EnrhnJEy_F7CD8CDwXPoBHNWWW9XS^b9pQtZHP!!cpad)Xn;B8u zs-lF^k8L>c(B7mgOci^_eD3$Y{rNsOl3?4=dh>fbQ%ioP>4rV&3yri7$t+~Cp#v!p zcBw&1mykb(mGM(ytPeD^SwVYDnMf)hKR0puSoD@XXD z9#GbXJZw{UUXPb-;Zg>$fN+FRh?npmd{jPS8+pD#i|+QLwz@DKeHw4|f_Y}j#(^FO zh_J2>P+N-$D`r7QB>rNhY^#{NDK}CGSyB#?n(ZGp!C#G!OOGlz@x6F$tBkG z8W!&Q%SlpLa6yV3A%!+_prQdKqm7-%d6$0ppG#!d;NUQgES0*$PIVbLv{3AH%l9BD zA%+@h@~f4+K?qXNDT&O)5sa>`4NO;!pObfQy8Go^SPX!mBS=dKu(K6xtQc$hLK7$q z_Vegb7T4SPmjgzMzBYN(c<~T_=99IpdPE@w^!;e1D=3|Q6bkn2pg#Ir#c|VOo6xZ2 zWd3rbz5Z3?`wr0i{*WS8U-$6@5JFq%L4^#;)m+l7lNK;1)+dG_vngyi^IWx}qC!My zgO^(y@hhJyUeN=i#nFl}?YVGbQqsWfe>c2enUPj6S5Y~(Toy|JD!Kh>Y3^q@Eqayt zCRUoud4I>gdeh3P7>kBu(8zfNG(G^s|MCAD?|bwV(#!8mClbl8FK1;&i!gx(y)cz$ zIvM3*{?@p+(6K>SuV3PVk9+xbHdEIwUjv<{ET(??I(8$omrAyXqDu@1LKRAQ#+tq0 zU=6m@Jz_bf=8uBu|I7Nnz>gO$7a_wgSw+js@&ooCh9dY2+^4JFYsj<_3jkEb<@(!g z1{*CY$B^(u#t(3S=h)%v7nOga)iv!3Z?|D`_AX$cfD()nu;?sC#4*aeOSps1wmWoE zrRdxqQ=ByfxT2vcN5RiDxwUrelg@@{@y20E;^KM^cc(lAZ446sUdKT zB}@c>vjx3?jbd-_ZI^-SK`=MBd$UyRZAK*4dxGXjtil;IH_P|qLKR-U?~s6>P49Vb zio%naKyS?E`F!?8hB?j<3V>r?-lH)bX;p-~lYjNEyeWi?ho*|4LQ0gi<$q&o|K(jn zQM3%`jVFV)y4(L`9|!hpE6n&2lMGc7c}bGy+%5Hdy$gg8mPBKX#SX}3M9tf7raqwa zOUbKbE=-7CoZ`IChda<;=qR{*@_Se?To7`8R%3I zK{PEGFGg|>!RI9Vs5Z&<%oL-nsi*|N0ceC`nOB0~bR~f^Wg`%Tc%l!UWn|E8r*(HQ zXO)5lUHa58K0(#{`X^&cKZnC!^7{OOajI*tdqP?NMeIbqq(|G^w^h-mizNJT?j=p<2Tp3g}b*CF~d2>K0PM@jCZI5RP9d95UZ38NUgU` zceg&zj@meIa3+D8Ov5!C>x6y7vQpTH3T?97LJFLZj1>*g9Za#$Ih3zLt9vK}BmOAEQ}NtFm9KTopd;#T9>Zdb zzSs(Y%P^UiI6^o}##2d*{=R}fuX%YF{#fMq*K6yy55YH$AhlnHSALH%@KFmiKAZr> z{8p;_ziiMO_&lNWb`30@5nrm75fQ!j#bE8_evuL(6B!@6dP}N(&s}oL`Bi(!WUNS( z9r$GtA8tH_ndYY}c@ji%S;#m>^z3YXdg*9|BuI5})|pLf6B}8-mEFxUvT}zCZ24+h zGcE*HFg9xd82-EO@ABY&!Ipvd=No7Uu3X7%iB^KxxDv|;K#Iv0>k}7I@v%W*4S%Jg z47VK%q};{r%I-&Ztbqm7Vbj|D&u8-SsDf`+nJ?va3WU37LTpLBm^*rzfeCYmpwWG{ zuL=fBFht}9<(3e&AGh1IevrR*jIb>2DXQOy5E~iqnSSNaWo%(^G%DGXL-~V7pgO7T z3pCgYrQd2@zXY%$z{2o}_6>t(;sy@*KEQ&rwtFZ?clC!6;+L!UmDRRXOQQnOB1|fF z_$?&PSBp1ksdpi<^CF=k3wvl@xpayDvkUEB9$=U@-G;;(Pb@@M_wUVl>W`J)-1La- zp+*$t#|;^}{Wpv8lImh~>F0!_)Z&zhm@S;ri6GMX%gWZ}oW~UB6i9tc{WLs4doz|% zr34{uZSICH%e!m$<{zxd0TJmSo`?f$s5)({d7*6(God;O#TGm;9%f0&%U)M~3UH(sF?4vi_R_*odR%EPz zpqgYrkG(giwyd~WLB6Q)&-TrT&|p8dh;>?vbADQswcuCZ zJU7E7P-L_mx>pk@*pZ7= zA(fg3!LjKA@i9dyQ2lEo;BebWIK0+U^JQ-)(R8wrtVgyg-4%T-gh}zQa@tSw6)KDj z@z~zuUMB-4fJ{Jc05UM_xv*YwJ|y9C9Qpq4@T0RK%fFN%6q}9%c?U1(BY^U-mMqqv%|KV~N0u8tYbKA9-C!ieioku&5{m zOcP*hqInK3^$J(4NRtJBZwg{;qaEvew?q&Z!z+s`k8WH}2R@iz>~ypQf4L1U-ngENWE+DfMt+AWN-itfdH@&E6!d%nb%1?6GJN328PFDjtyBsG~j^D zh1)ytXTq-NrHV#u+Qp!?W9Cs=T2!}U#0xjsBwI^fz zej2a%$7ZHJ7EGBI^6SdPiO_V8hsf$r&FSl6cb7}62jpxG>^&F7gBA}NW!aqGfE ziSH~+BFP5Q#@$WtFIq82HwlYWJXQ|ah8=4X`AKgaIZv6txxX*i>k@F0ZT$4Ch@_tU zuXU8R56d>s)=%h0@3Io?I>kg-OVB?P4E|v24Lj+lij7@!k`M6L` zAGGKuNIbYXQ0ezaVMFhxE^K!Pzv58=0mi03dI}SOA!}4Wx6<5@7W9{Tk5oNY*SdgF z_q)Xx%W`bGMOS6zgbLTiau1$!j%4KtnyTFi;vg?fE?mm_AugqbrW_I1$eoK{@8%!> z>@5c+N<3yyR$8$B!h+m_<45gaA@9)ZHJJs~h2Elw^9WvoRJ^7r)hy^?4=+Ih5u|Co zJeY9!7v9W%aK2qmY>uZG(xfl*OT`rFe7;zW+pZmGUYt^L99ig}Zl_lyZm=5md== zIA^29TYp)BaI2q+&wajMeWD5(og}R1_))5jPBuT;NZWp`HT2ryAGxzS`vzbb3|O3L zqsx2>FL*P3u_10%=v?eBE2d(U5Qsyd?U%wkOi@B2Yt90%#LStLu0^t=#xF(Qx_>G& zl=bClN0>;F!2qBj88bt|Bo9SYjAgEFBF4@uQvt!&?jOh|FjK3D1Qq~9T8az~Otf8J zjx8mmROWIqq$zfh&LggJ7wp3&94lx-$;Y|<9u)iUSy;K~Ko|}VQ>J-)7f9OzoG=wq z63en;UB8g#xo|_v9ndC-D0kY8O&PB5+J-NqsSuFILZMKyApw+TB>9O3<#Y1df?6ZC zvJ6>62L+vFW0cQPwzMeY#GYNEl))xKAp|QPNcfmQI~*-Irr5Q)(FG7$Pb;rEFYF=^ zA;#VTW8!o((0{v0C!6e9SSwrWG&R%x+O-iKXrSOU48)i?4Fja{77XMP5+4#^e>i}>~WxYBp{45b&UY(>2H+SEMAf;t5ut~5mxaGKlK3pzks%gj?D-aq80)Y3ifK(}2$E;*hU|ZVPoJ!$@T8%RG z1SsF@TO&~pJNMuGx;-~hTQ(%R%XxhV*GuLma0A5LJ%r;q7QFl{J^F#cAeM`WZD7W7CALuQi-`2``50zW@B!zc2?R8Z#|krb2-UbNglTO;IqG zk0O|;sXAGQ{jaXkNi#k^fZ$32ESM4-j?ivR%vAI*-p>*qK#cUI4mBAlAlE+dU2T7T(`O+9`YZ!ccq-KE#Gq4}+3bvxvkO!PJkw&+!zC znBWEyXY?X@xMPFzL*iBr%Q6umEeLaxy1cHD5=r%_L&1w31cxV7*v6B7^17JFK+XW6 zKwiH@bBXU*9fT>ldd9>d?!i7Vn8Dx#n3jB5SAR?dg%dBsOUx`5ZNn)|b3Vjws1;jW zSuvTdL_jF}vk0#TVm|w*2P?uo_t^q&Q8(rFbY~M{u|SXowWs(F2^^i5EAo9iQR(w2 zS#2{ihZ_|gIx`qPMi?CUy zJOV2IU!E?5A8~Mh-Q_^}yh-2fRGeWiXV~VR@6#=W!qLtdLF+*p-`Y;rkFTK$NzbJ>ZLOs?tC+& zh;a9o05Ex(;dulK${P9=E)m$^+=D~0v352tAi9Mn;SMKaoqH=Bx4tLVRn$Oz;jp-9 zJA+b~Pu8J6|KBeD%W2yt|AhI1Bn@a)c2@1}Wb$*Y000T9A@ooLxdcbuXOoHFHs;({ zxvanbztvkI0Cr2W@*7x%vwZ7y>nvFf>@4qy>%9Em=BLdIpM_7z%MAz7m-FPARR(kz z4bZ_DQC66;_>=hWW){KF{db*lrQ#J!l1Kj_u_H{CGWZPx<jGSL+J0L zHU;lQ7wq;7R3H3b9b~nZwUX9TKlqQx3H&Z>a^%18!O=xiM%2Dlo<6;epvbI(2bMlZh};^u@7WfQVSMfSF7PBEhC zr+;x+qkXd;B|+P~iu|qO;)tB1PU8qRXELL;SrYaA-$QiVStngHbDS81+4>_8yD3YX zI^4YrVPqhf!$b!3Xz)L_Z3Fel`|LNy4SEM&!RhPrC;e=up`<>1^b}{fc@J;DJ$|3i zqKN4uqn7it&O*NV;(PQzi-t1FJp6t#jIHuQ9Vzhn6EOIE(wqE$ut6Va=Pls=K>5dw zR4b#)+YmkfKShWjLJfR#CSG3Yy^OY7-_}>c0ufPyB~duA6haT9O<0@`7m1r7mG8tc zD=@*5{hiKN%sTM(FAx8)1%aI0j$ zZho3vrqM^y0_QA^t(j2 z^thsN4~aR6sJ$zQDX}J()$BgP@f+y^(9)(kcVL;1?f^S#`dk|BU{?E7L@SbdqdW1Beelf}Z|j#*%TLEGftgMA zH3kQ0kgZE?0uW6+ODqa37MuwKQcz3VHzu1}Yc#3^?W+n$5`_-x9gR~j77SUx>`*Qc z>@8`5jV}s=MnfdQk`_{mXb=ER1qd! zyP%9(9@Jcz0TRXf+vil72kDgdY_NXC_!!)`R|&sVS9~I4vc$s>{wu?ILLe06jw_7~ zq0$%qUVm@t@=e&b$+f;$#5QU<3Q(gY|EjobR@lBqTU}Ywmh9tFI(U`u@X*df8tdt? z$BQn<`{)R$=oPkx9G6JPOUBu=XV=B`Yc?l^CF4=Ql9#>5O6nqLWy<++seFul_CbB|IWBdIb>-kJ67Erqp4XEtkX?k~6|%LeT-i+OYT0ie^3m|D)E98&Ndf zFZb-^zhX#MFMoAqFJWpXMrd$%Qv<%4-^#DZOsO_FM>|jYe@8%gBj_xEL6lKTX#Im= z^ukoWTP@3ZI8_8#MTjJXwlRRU_Yx+7a6O8Lma9FJQ87Uw-(c`is$r4ENO7aAzsk{2 zE(zSxi?BC4CnawK^=2hr`d(35D<*O$iYI~ZKpVp|5m(Z1uE ztxQYjX3#*R3#0FsutJ^xj@T;g+7Nv0A5Nct*AT5+v@jvg#vK&SiphHK{Xzp!{zi1! z_WQEEp8m9g9fb|y{(JiCBwW)gIEcPpX1Rm~l;{TH#q2x*_kA)0uj*w_YSp;EU~Kj6 zXosV9*l5GpUJm#IO-9BUbIJFFzh93Uesr$O+0%;*2@L=cfJm_&xEB%8$6;=qmTzpL zYs^3Y&`J$7R_6}6s^5QNCCwgNE%Ock{y{U~IWkv(?7$wzIJXbw@M2Q12fhKo1i*x0 zJUYz>G&`Z-1c*2^G|O%c4NN$UepSH?0o#BnkV6fO#@_^izyOY_F`xiX*?qo+tM>Tp zz1*=tcg4Rh*wi`;N;Bd_cn$=GKV{4DG2?63uP;oAm^>^M1wl|yo%Y4(P&fwbz$e+N z`cLx0OV1B}c!IQi&xg*EW1dITc%+%fp zDM;2Khz^7;{sLuVMY6-C93rfIS(x)NqC*RY07Hom>X=ObAMsa+2k#Ok*O)X!N7mRq z(`Lq$S}m|}2>FnyVg|fI8R88SiLA^Wgu;px_uGEPyrw*I|Dn;pXzdPb`*|7nx0JN~bUokS(fGUf z^=b(zcRs3S2Y8Xh)z#&s*d#-vb}88gELT1@$M6TE6}7?J{e`=8@;+ZKprOT;2Qgrp z1yie;?rHP3W-qd^0tJ9dK~=u}`h33Lh0rFk$J01#(RMJ6(&OLTykQdu{XD({a(;s5 zY-y*r-`zjbwwki!|G)qM0MsG%R0Ozh{SQ#r`lyauRfNCQOU>vS=Jy%Rv;5wfztwf5 z|5YineEa}kT1{Zv7Hc9gUi34f!i$WbI(T@AZ_FE3vX zj10sXz#E!2t@;3MM%b6CJpwOGtyuA5<3fUwgw6_z6cH*28kFL;m|dUK>~|sGFP_%@ zU-XE=?|ObUyoi-hPy9*of%9g}Ghgf5m54e1^#3bm^}cUS#ebJ?>>ZR3(-d4AY@A4b zM0`sMT0Vu>+4vmd`vziJi2$9herSf;o|WS5G#$SOZ`5Ji@F(qjUu^_$HGn-)r{9-- zuwH_sAhCSNXcnY?+5w$$YG3?u%kCEvh6RNVTfOcritOd?_i(Bn$?@;>IacOYk;O$I zR=YHVPZUN0)~FyR3_M)TmvN^SJOZ!*t|*rl-hdgIR1B;dBze7Hxwv3SMZ^P;Xw)?Q zIF@TQAxw3z{*qcqD36q2L&_n?nJ#6nWVw{OW+;@H5$H+CFh@IF&bnlk=}=@zE>hEZ zT1fOThbXiXvXpS4jy9zF?vvBRjG>=uo4V<>tNv^n3rL!>^5L-q#h{7X`VX-FQf;ig zjXBCzR$~Oq-UPw;4|8|^U2oMz^F-i*x0NED2LIo`TZ!hrNIv=> zDoP~|5?QFcgA7iTa~S4P2q((_q|`uhY|cuoi~1b-2-tHOQp(CC!kJc4j3d<=6%+M_iEt|Ahka`r5Z?592E)EK z^X(-8;5GDiA&$Kma7ZSnm4H<_5cQUsZw;s3ylIlM5h-?#!GJnL zSBn|Qqp-y=3dy$q-FwC!WV! zc_ETx_UmkmZ-=)N?lTQC!%wyh(9%d4eR%07kX}Ld`gjr8Oxp*4fmMh%K62A(im`nW z=pJ76@Xt*eP@`OrUAX??>IExt1N>tN|6KA5ckVi9Uw6fSIMx>F%C*QzDKJn8SEax%t{(GXwjqQ zi_$({YH#uW!2}Lc^hB#FvGZ@r*GEPn6L`Zcl(O1QWKo$`T~=2a*3W;wRilw1F{K26 zMF_$L>-<-{&M9NWwk^1=*}u$(*Y~H&_BDpvY7yhq8clP7{Daf=_@Z|E?TkCeLd*+# zvE}v&ABdOGB@@}AqUhxypn5b91k&){Td?d8Wf$7)8Hss9+Xui1m;-pheXPBsq#HsJ zh!OAC(Wftk#Yc}47!XlP58neiC668~?R@w|R(UsDkCjiO$71ODekxh!2n?}#V*&lH zdlxx{-^;`%{Ipu~lvqwQS;>Lx%gatC1mI$NV$hNfTllO<(EJC~b zmM^xlugu2s6cM-W$i;8pvRqNNKV#)7^gOqwie$p15D}h>28s#cZsTZL+~Bs%ZXA13 z6@-EKuPMy872u8_w$9YVuYv?3I>`x1arUpji-^2}a$a2005zBbfejQDKtKNGM$0c{ zos;lGZzJC#W*+PFW@<v*S3+91Z{3g5{mipp2$#@+w?Om{gZFwECl2MFN3&@m`7Bs65#?IQ!4SFx zq$&yoVJV-SAZ+!we!?JsT~b>QYrysCs`fa35U=-lcBS-Ep`z$MfX?98b#wQz>~}*K zuMpBB47!xX6>A_Mxzuhl{T*#Hw7%A3`+084L$sR51EIIgTv!8b1v+pc0927fzO$+= z-iHE3$8_P-mwy+aTMe)akAb>Q+AS^UF#DU&^kcgLLmlf$Hm))$uD5wKk0E- zmc_;R41^qn5)g7iIjbBPb6PlVrb1E0^uk1s$M!^cvY>VheU`nEI@Vsr(NStaannHq zQ`1^LvXyHgwPCl^-rtL_uPS)p@rF{3DF`q`bu#Ny=&p|39zR$mU-nTq?~4_;J2pMU zoS0d8`XNM+Mjb9(E=#@TLnDy5B^6Qwd%+~Rvnr@s&xaD)0oBsd|A)A~I*ECqXSly= z3{ndgMbk~y?2&^xGi#KBIy`Ga2p$L0q}5%9(GXTG3n)Zc{0TxF5S70bFbRR!i9h!^ zmS>u}l3Vw`3(NO9??03{+!7Br=yr)l#ez(Kgl(6W@x_ak6yyb&1Oz_F~ojmbk=b-8_#v%Rs+4c5uKtMxZrvHa8e{d}Duls@( zG_S$P#uUQNKl-~(RRJPiVqAAE`65zmaOtAqDca7}#F9cuEC*Lh4#XLMdIH-BkT##s zJ73L131J^_*DK-q*py-s4ifw^F;MoJAWk{swf^W#hyzVOmj9OQ<~f-#K{RUY21MBY zX`_`)8;=Na(IW}EwE-!A`xzjZ!^$#+S_6fD?d*d##9t4NAS3=EW?guw(;pAN_0~HK zlKNbCIp9GV9sV}OyDsfABGHg0%iCSEd%NmTA>b!prew#AV03-JhZck#=Lp3R=Z(3xTvUQ#p;uxvY;`1X>GSU6v(JcNGN^S(CrzfM-GhqA-Os4=Q71EYKcEy_n8R zoSB?db|m68Ws8qQl>2cOnhFDW&lKo9=ZTxH>QS1Dw$F85<z`NvMgy zbO6o?u|y7o0fWMT#3R5N&8Khocwy}Wfgm>n^f7VXBJUV^A9NWs6?p}71MffCMt@wj zNIVb>lz6ejLO_rRp&tZSg~<8jRU!DEdyDRoYJ`lKw0kthb)OqD5T9*g=im>c# z18!H|pB#R_CP41x#FgOQnPV&NxcO?un5Wpdu?8BNrHjGYa1DQ&-MQie*pxo%g>G~R zv#AkA;xyu0MG6zsvC~Mg#gj=ohgNaYRAb zjp=z}CMD26D^>_t2(yRS%R?Ya1%u#3JCH#CQ7wo$*a3j6Qv@l~nRbi=y?}f)Z2=W* zs{hHjg5OmI{K^0TfEWv)SOKKKANN2NL;!_(bGJR^fC3c9?!OiDa46wC6|1ldgm?iG zSMn~KN(3b}_Gl^|pIur^F1px?=q*b`hYrIN~Sq>212uGP| z0}yBCU(Yws$!kUDt#AJJ0?d3^D=kCZ)kOP0nRm+DpE_EtQJU&{K3dyk0R1xgd@j?c)% zXi-tO6#gG)xQdTo15l_R31Ac>^2Aip|Ia44A9GxOS6NLMt+yExJedMaasa~%px5Qq z4T^s@n(fC%);sdLf&fBtghL4AFr+&h5FiB^4%h}=z+H~Zir@b}|DK@W?^R3R@AMdl zn||K<+#~PUp#cVRV!rLm;dgtdrYXi_c}zwT&hcgBUfhmq){b37jyb>~r8MLVPymIL z`mi^aF0{Tw$i+m>!B^gM*VlV}{JLQ_EgWfz0Q;EYgfuI_96vm8s)DFg!5{E^OHRs` zqF|UnkC4H(zR-i^Z)~DW6%DS08v_D_iu8-fr{(Y@1VE^afdUdVMgRmrFO7Lc01^IP zd+spWY|dJsRDnB%u)#u5Ph1I}hs9c#5j#bzb41W%tc&G$ijF9<1>8Fd2(T`fz9&P_ z?cPVXkG%xMrPRsBY|D62Jo1#|4VP|&z`7sCuKF-Q=zZTs@*Bu05Ja*oLv7NaJ(m%n z`0EK(Gorx|qM{Ht9|a`Ln{)IJEm!uxxumgYlof)2sAoWHj%dMP^Yzj&W53r`K+rG| zc;vtJQZWQ1VX)$_#SBlDdOryEKhV*oDDhd2Jg&MQFMf95h6={^+*gCo$5+cii6D{N z-$AZ0QxKLoAM|2gCx;-3EMwQcA|04+rpbE_!~6nB83mA0+JMG#08!Q~xi|GjD+j-( zB~t!K5vHM!p;0lzbPt*l8H2iw=s>{{0-_GSQy7DCm@(s%$KB*&wo#%^BuoB>$`e^s z*sNSZ3e)z`^P4sVf77my5iJ@%U+BT$h-#RbAg&EJ7}qSes;K>1Q^dk+jAz1tUDjXxxXu+(CBkv|Mp8!cYiCkQj}XwAaj;D z(Mxc??j6;_*Hs9f6+H=OXIG8#^80V1QEMsoy6yInM{EE93wj~*^B2Dg z#0?;`U=F(LfZgd8n*|ZF4H3d-pVLG9S@ieUa)?#oe(@VrjwjJrLAKKWFNyPNvi=#A zNHv*%%g{c%G}7cy?7;{fJmv0a7{9I8*%X;+WyrqhY#vLN7 z{8)$C_m@hQK!zL=BsefJh)qN_9|gB|H#({9>bzsuswfDlXr2n9d{z!AP3_WY6y zg`Yf)g9?|MFNFL|&tXeT7fQ-jiZM_>P%ufn73B;D+Lq~6N6Q#QI>X?%J=@8E?5FMM z4cb#ocyW4K9r=80^7I~WeR((jtK{G5-^$aLzwgR~No2g20`0Ke+kC0we>oUnCO_*z z8!`BRB>wO-QGBb|P)j=F#XPQrh~MM=c1REbh@op#^pcgO0su8ACjcs-J|UTD!h`@@ z@Lqk4#(-L?Emgnw8m_>?2e$_)L%hJEv7`5-)bCL-UjDn#yAF+Vz5((8#FzsL0Wknr zfINesJb?&xpt9a{L=gNgbfY^?kEVhiqx136W+G7gx6GoSKo7C31V);Be*bJtjF`?e zb6r{ov#G*OC-soNB_1Nq3cFNcI^8gnxo15Oa~Nfr9xsIiazRD!je0MSpAkww9OZ&y z=4>kdv;Ta>zNLhOZ2anRF@kyt{`dTWB`ZFPPxW6OY-7y4l%G182ncK+vrxRE(fbT* zR~9@-^uBeiE?-5KCky)Darfm(rEQSIHy}Xd1x$wSgF!%tyFK#bV}lp$t!5cC@n%%OmBRp97RF8A=da8r{#s3+0`w5zvXZIp7{{P#%|cZv21?1VhGV`q=#x% z&sQSZ@Eh)6yj)#{_6ol|xA)@un9n%}2*vU5dqb^rNETY<-|Rl@p?!m3@VwnqmZ&OH zlq@`i95fsOcpzkfo7S<^9nnDuRg=-*#GdE-{HyZ!LS_B@srDukLYR>wgF!{j4#q+V za2ky*zxo@(l>L`%jM1CWai3*G{MV&5j|Q7$bZJv8p5Mf%(_#ioERz zK6c*9HvT5ZV(beo=*Yz^K}L(uxs)u-D4vDO@)(=GXo92Vqy8en(0d2?5oDlod6ve! zeD||)2_}2+G1qDlRKBb;6u*}D2Vh-2vaq5sU~tTvxdQ4!A}2>Y%K!lY1GTTRgR75u zQ$CgD5;(m1TGC#bkfpX$%=hI^#|IiHh=_(hQMV*+@LNmuz!+d^xy7KB+!j0Tv#cG? zS`@~U5A0i9Oq&Dp#6x*zk=&$q{vy*%?O$8*(2DE7PvRZ-e1&(Agt~5r3>_1j`aQUi z79hj`EPaU@4rM==<%+TFeSLK&mp9djFUlA||C7)>9L-?a_{q2NhhHcK6UZe{PZE0Z?l90qr%mKgT*%*dT**ge6e2GB|&9@CU)K3EciQ z_1};?=pTFekM`xaXs8|lnhWX1#=~|IKSzEeOKY4R=`jUu!f@*D=Vw=Aj(sp4tNN4` zp+UAjWx%ihPnK<|^6>mz#H;*_Ws8t={IdCeE(RmFk+^$?_zbXiNpA@S0aPd}qFJNc@5RjHc-qZXO@)lpr zS9-?HuVv!|Sn@H~BOQ%#LT~il^il{62=o_R(Rv#ZWuqF)8!w>Q&=80kn@FUU9DD90 zW`Ozd!Uo6t>s#*1A8@qwtRYI@K;)c2REv9IU90V3Chx4EOUT z1jcF7TeKe!jt-6%Oc6lDU!p=4-xaK1-}#fwpd`!}+0+ zhkwhj`6F2?B2JiO#LhWe}|k{?*8pp+w%6@fK7-p8CGISqD)T>2EI?RR$WMCOAezWvCv1Rd&~?SQXxN~eq%kNF_*mm zq5qY)5+Lgz}U!Z0sw3-!od^1@-Q2ZOTou-h5Tz9Pj0mG^g0%gk_iZI z$RNeuCAPvz4V)ljSx|wi73j*&HgQ05^wvp2V~xB2y&FM0Uj42yA?$w88v1$B_C3;K z3qEWe{v}3&37&Axkd#-@Px=wy87u2qeB7*0QT({(9WaGXO zya&eVr~VI==!PtI(hLD{<@v6GDNcKj;2b4jT_2mce-oEWteiDAe+pVS;XZ~8B}gz8 zc)XVV&{k-WT1Bz*A|PlEg5glLK3H?KM4|;Uv1Yh9?Wy2oBxU)`^u|Cm1YxM6m3Qzlyr5>OHpmYt{cyq0khO5oFa!YTFDN@JC@0I;6Xf9@G$NB?0lK`% z-}j}^pxI+J#qoRp_43n&ql$9_xuW6Xb`VJ0`#w9qzrMot3KrFk1Nbh)_J^T#YPB|5 za_O3hDF%?}K*0-GHa%OvUm{bC)6xv4}CtL;ap!pZcrOG1L+s z)Bd|OCTv_dF4^SYMr1()Bo@ajc2o2!_#+cOY-m)mZ3=dD{}?F2_Ej})lOSl z#g&Z`6=T92j46dAd}ti80~;s)RPdN+$MXsX_Z|>DGTeLRbcBL|h6|8EfnqA0Ar`V# zV5QkV&%7Dh(#_3)pkkSyz(@sFkb^j=FdQk_(dkzU%zPolz_@04J@WsqXex=0rf9V2 zR6D_BXQu##`en}{ee^09b$&(`kCegk@WhrkdawCgZq!2BuE&UxpQlgd5_C9i7k}(h z`Z9AfirDGic?mb6#i2{#* zn=i@VpUnl*vH8u_L>ntjM+y>FQ4Et1iZwtGv|e|A=mCyY&Jyj;(l1F5**^F#nqP%+ za+iI%2G}h)lrH~{ak8P2!VfZB#4uzRfaUj~MV46ArZe}p&`Ov!*9+zP^%~JS>~?xK z>omw;=k-4?J~-$=E6m0wHTM3GY(F;TELnqw)+)Lc_+FdQW$uKI*sFTrz}kd3s=xKW zo450K_fZJit|w_0{d zZ1z?s@mg1x!SCaTiNCuqm_SGB|8NIn*^|V+aO@Yufb=+D)6M@8`wWFka6Qdw@KOJ- zf{seHEm*1uMw)$_{C}WLz3_D-mWPT%IqH`0F(Q#3$hOwC9>}!Kw4YLjJMWiT@;qo{ zX>iR@fd6$Hl9y6w&h+{qo%J)g1lu*&Zobq$Kdvs@S`cbH=KdUhKmP5)DYFPCQ1p=o*UdF>Q98J913LJ7KII*k;x+ zp;P7b8jp67VVIz&FPrlA6-_pnNGfbPov0o7jd^WKqYc2*cr-lu!-J=ue+zpD(M2Co ztf&2Dwu%Wr!WtYwCy5*$G+>CW!wJ1(!3)^N<<>jFpt&fN!J}wyCT(fd`_5ge`x%Ge zX3rm%b#NS8DDWL+j8@f__7+8swxdqVbyQk*D)k0AZ8|FX8~``^EP=!RG2w8-fz+C#<##cyHg+`)B5 ze&=DMk`Ee=VX}T?f{Q@ypTX-Oj*q{4-(h4RNe@J(d47IW$JUHj82KhH?jge6sDcV3 z-?f(U4N(Y+7#v#o24?CMc*l1cS-64=4Nw`Hr(Q{rcncnh{Iwt&&pEvaJ8DANU^Wn!GM=Ewi=m{%ES;lSeNyxNby_7}W$t_Tl&7UCEo%IWGn*|q=r z7k|D4uXE+oCEt{F)Niv1zPMdR<9C3Yd}ZY|PG8@>)Wp7Ew=*1?b>d7kE;=zVu5b6A zn7z5D28Td}Kp}f{`FmA!>Id|B5Gb9sxb5nWAmBPZr8ipye&IT=bsbHZeTxcda+3Lc zw8P)}FD`KF%g|1TV6b*5ModAC8Tc}cO5W#*W@!r=Ft>igySi^&HvRT5DyqC|8qkPI z;IM#>Gp>_=hmr~{n?$o`*b`V*2|~6EWuc@K z5($?%%~hcRMu|7^{s^=mrN?CTz%$Y{}TEOLuxgE@kCHy%Z`BEM^O`~ zQyVV6lx|=D#7an{uEZpYNx0n3hlN8ds1C!-us=}{NUktD zeX=n$SkLk-g{@$!S;t4RqR@Bi`D5-p2q&%|TqAQ`KK~geCD4BCHeL^lqqgz%uVqDk z{1>ze6+whxR|n)!JfwCHpv|7Wy^#eF^?<-CK+rg-G!DV>>|CUfIWYsX3`+?ian?=Fk#T<| z?!E6(e%8_M#Kp0Tk1wFZXGa4KeR~Y|R0W-i`eI9B)`+T8{_@?k&oZOP6b#CtwzSM^ zv)}e4%bn#D*t^vtW25)WdRWWiD@GdP(}&GPmwX*Tax%q*4#23};?0K4E6b#;#_1IV zLa<{dWC88}VIQ8?5Mq7Ey#oYGEGpl*7r!UbgPH@&cBwUrZYa8pTrURfXdWJqPZHDi z_mqWr{VS_6PhTZXy4^F7CJzRxc5t`~`dorYoIA~DVTzy7m%yc|W-eppKhcLHWX<3i%W5b)i* zn4)0tF2_Q5zs*Tm$kPqzs>f7qU-wuE1cx$PwwvWG>zbs#e z9@OQ{ir=(cM$z{i5WpB`ztMkxf`%JBODKZKaCZFYjH*UZi-6*LPhdP_BROyQjL`mz zs!=$)GwP_Sn34jy2rMMmIFd{4z!-UM@B4`g(0H-)hX78DwAc*9p`t=J}C7p5u>!Q)YBJl0<5K?U%2a0hW++2CnM#fbWl(=hy-9l2H;i(1P=P613Mk&ibF$Bgm|MPuD>;TcLnG!{=|QNid&)V7=mHo zm~;Z(2?m0ps`w81Dh2l`vL9Rt^3b&E8;aPwpZ<%FaqjaX0%b^2fv_a?n}*)x-Pbk; zVnaZ(J&YJ7U>9yOVF(kT*YQC7Rb}&ICohVX1R{C;Z=etlkGtLSs)F_{7=Ye*IF5w5 z?ZNicSjnDS3oN-pshkvlxq{9!iQHso6|)(E>!d$^N9CTtR1efBIQ4$hP|9`LM;5#!_PDZQ3>Hq#zdE9|f1?)FnhXfia~M;SX>EbXAUj@WJ^83D!?gH&ZH9V68@eJYy&TtJs;@q7U6#CxK4?azu}i5K26v!5f_%N;s4ajHyekZf320YTr2PnX50#h}+TXVPjA(I#pmd6su6pXn0-$Z_{MM9l z``6nCLe3=%I;0N{h%qCKz}o-;04PWVUuG#z-MkQ&ueh7P(L04@mh;dVnYGfYM|y=m0gl3-OP*zdk)<9}apA^V5E zFoP3FZmT}lws_Fgmf4tmW?1vz%O>}2(QSMhkV|YmS@0N3ZBNkg;p99yllB$s7FU1Y ztfdy*xuxjwoW3`Tsg*CxCz~uC>~K|>ma35|M*NiO8SpRb`8QmFCRKPu;9uNgO0RapGAOzk4 z8wrBEsB9(FfOh|eh2~Ur2-1aa^1o-t2D=@0`JlpQ!=KzeJs1YN$O6x{6=XfCXn*4Nwbp1yr=P*Elc{L;b7(}G3~fv_ZD5Sd&cklOKZLg33l%_fiCp;fcm z!x=1?&Yp4Y>w-YgkfE!b3sg|uC=bx?eyS!snAawEGh^LRO}741;PcOn3frCqV)66D zs#|K5J`XTvh?(gWs9uEa;$MSr7PZ@^Az*W5tE5ZPa8HD8_{^)fzRG(WyB+pv$FwQU zmR)uIiqLkJeK{kNC;*X>Km^RE%!%pz000ddA^BtkIpihGPXFZc>P-lbe>YC#`Ys*! zsD1xm>aAp4-dVjoguJuJjln0`*E%IgN_yU_@fB0TW-9Gr6BXsB7I}-cu(t~~iV^kg zKw+|{0uybbV&&_3R+>jtym26I&lm(_8Ae`*&4bJaVQOhv}C1);MOlK#v1kx_n8aiF-p$C*5d5!8wapwC0dK) z%@iIDcBaXd56+8*c);QgS!LO3U-ak}Zu^;Do88>P#K;+8pN!|Pr->2?7Igp$Eh91? zKNkovq+sX+xY^#*7Kbvs!xnRY(*Ba3A1ze!>Zs+$x+^+rw+MRBV>o zXF|$B!c21%k&BX(0ul^V&M;^yx$F1nBl9A(Z_c^VUG?i*?xG)!WOC9#ye5SLa5MtI zN{a)+V^!eW!mFDfEu7#lk{VUf|K9{{ehypSV0jmD};2jh-)2GEvpW(kG0iefKcreE5$O9%25&pA(s7K(M1GMG7LKaU7<$+TXLRv(| zefq>M={E;@Gxr!xk}h9BW`9JNNK3co=OdAeci-jr2^&v;a}__idO0obfRe|vkXSVh z2s1~LmVOCXK?a#@eL_=xnqCXt!Ea!hILm4Z^|=Tg2ChkYD7zzHbKRVcM2{S4OCDZ} z(R_LgO*ancIrzAcn3|b#D8+KB+=1{PwG|G93vG?&<&R7(jzSAhu-5Ccq}xKY0_CKY zJDjBx6b%L%`29UGdgM3drS&^OE~lh-M)S}A1((<}UB_42to zVQ(-7qxyRd#05O_>oNJPs5=>Z&r zzQ`s8&?QGfql_$9aE0JiY~+);t~Uvu5*X(c|rMIJ)l^|jbMNuf>W@e z!imCzNXc2?{IR)aBETu`6{I2yjJCk=93PC@udhB|3NXw8^8m*lENw}cuuSyH7Q zU(+K$in^OqbqPt|*?7yZtVShwWJKw6L~q-(mFPr64}nyENRgc&U)NfW0Kaa`-p=1w zKflu`mm#|z%rN)6-QD+p#b-)_>C^FKzs_QvAJR8>x(?Z5i?AdmB=&ks!DK~)_&bn% zi>4yn-kt4Bcs!O`fTND=@bBQ}z4~t}c6vz`D&9GJ0&g&q@0D{h5JON<69_}aGS>)> zgMx$G)t{T{O5D7y-f`nc0uw{yV;CxV17E(-?aReW53>hj&I99!>o&N4C9i^JF2@6R zK;dd;Sil1Y!M+7}+C+n*L%Neo)XTM`LKKu~4a1 zy@k`G$IUkPej)AhtKA|wSm=DDULMlQxQ_b5wz~dBdV>)A7XyW3P~_&I<8D}hZ)AiM z{p;3dDw{5a)J}zSC5(dq8R_U)%j!QDE1C-Cie;uZ(#|CMGM)<{f#-!a0}1_uVBj=+ zkb}3v2^v1mN+XW?%1fn_XHBvTbR0X(hjKWmFm2*pPB4bbn;32hUI)RxG#ZeA*zX`D zxxTC5b^khR{0_bmltE8`=kKj|_?BLt(Pk+)(1ld_9Z0wNcfsTzAqY5v1?~ZHr?YTF z+iJt@%b(wh@te0W4ZkVUy36}}ghgM^WAPe63fu?KwsQCmdLa-XR}$??P=Jv}D#aQ2 zthPuDK;!rONQDxpydfq_Ia<^9s~X5^0E-9>1JY;uyWsUub#IY{7eSV@)LZ)xNacIo zIfb!g55AK=1_H6AyQGbe?2N`a3*{h`gOq|YM68G<-`sspfUBRe8Hj2L29)#eW&j5jTh z`rps|1N*P9<#%FYd*%`m9;XuQ2N794y7~HngmV}J?*GYi;B%m=QTLg3Ydow(AcIa` zAq%QW_og`<=<>PHtvhj--Z2cLDW+cJk_Ao*7p9g&M(H#oF<#hz-3A|YwBuMax;L`u) zRmF`#{W_4<>w?xh6mFyW3OA?e*{DJSuCjJ?i^J-Ffk)&Yzicbr-=-h8Wu_F<@9zP{ z4smy4t@8eG)-pHU5DEs(`lv8}^-~tn@vILr_%*c!_Qe;4C3g?}K5q}3EI@d|UGzmh55_gc^v#Y$zol6Mmg|3A2&@!~Ys;*?V8PR%PuCUm*r| z{K2k<&<+8Xq>FaN4D+m|6H?QlrhgYzQN*&Eik2$;^zmfoyiWUK-H}X9{oO&z_OK5R zZ|z)Jz+%s+y@$t{68Qum00RT8JU}ZQ7Ro(y*%_w*Ifm|lid_~}73ppS2)UiwS zwhCL=s){3Qb_kvW1OmIj>vc_j+$H?@MX30BX~~U3=UgL{HUxmAcl!&7fjfE9 zO)j(XjsN*qWC;ZZ5fr5l!K~0~^?CFE;6nmI3uZNw$^G}Q^v}Gl;whO(dP7n;RZ4vd z$bPF=KVYWdW5mxz;Pm70cBd-l4iKfOLD1V5k_d(l$Dq2mEy1k{*aq)c$^8YVgIXr9 zEv`F2%%HR%<5{Bjf5cWu8Uwf~YGLnQ{rVhStAfzrHG?)hI+)q+Kg!L|Sd3Dp>oS}< z`S>668Ce!if(JPVV8?;@Nre-T{lM z|H80ZQHLA8_kH97!T^+q_%*v+y3#;X1%?ZzHK+S$zk++ z&-jaJ)89+zjBH0>dTd;-x0tknB@9^7Te(bfQY}m$$hc92mxzX`@urKTFUr&Avjh*L z$T{#5e!&0z(0aND0BAs$zlNVwSKF_ItPgt%yxWy~($J#A2`UT(!UqY{i!oY1+oP4= zA}3PEXnWs>5gqOF+w%D$0S4G9n5qJy3jX@c2Z5yBUCD-oZOaS?1zEbiVZ!Y=0`|`c zpKbb}s?_{ZzFV`|0ij{q2FMl(T<@+2fv*FA5!K~hgMhParU*N|AHag_uCA|+h2wW0 zgaL|szm78iX@lTCK$TmshhN&u-G@8ky|+ueP>@|?HYkIkqKZKznXhP+`b0026v7#K4&Y&TF`wwtNWy65EFs^-@K5^ zhFb*IP~QtmU!D;a0B@i!L1a+H3?E^2gNb%0K*Rvhv6xNmRfzK2V4Z??7C7mO9yUDQ zKMxXn>Hi|!euzCr2*7Ub`|zLs(N15NI#DbX93Jo<^9Clp>!`Ec-uGNv)AHt|-sXm8 z?RO}Si5FLKOx#$KDRHF5xRwI5++VMKv)Q^HjTOt$NuY00`V6`e+2Wxfkk{EQ+hrxe4$0{)elPbO%mjaw~ey@|fLqrf$ln)}{K< zl}{cpC1v|^+niujbZH?L#*8JZ@0N4cH~V7Ce7F*;5Th1;!Y)$MOf3sYQ$s{4ND<6R z0)owqE-L~CukwzRz*nP7b8p&G>cSUfDU1+7BH&ca31Lb=(A-Av7M>JP(8x_1t<5HM zUz0)g5tX7qeg}iq{*qV@JM(eq;J!+7ibYtUc^KSHaeb}fvY9TXRD^_r4T;N5WDBhZ9^UPIMW33S zLgl(4O;V6j2?`bW$U(^4N*A1Ebc|Fjk%Z)AD4~e^HeDB5VjOa+29Q?A#?=@*V^&50 zH;HRVbzpEAR}i)?B1hZbZ02DzM$UlwkV~k|nOWINWJNRdQoPwG;!uf+7?)t!KLXnw zDI${d&`L^|huDK;B7yV|pqQjTr^lfkgJAnKSF0~{gb-l;p6^+(Gf?mV{g=fgmw%OI zuI|#pnhQ95611yi1^WWYNK5sMWlgaNA`(FeR6`Jqm`q}_ZsN6BySi?q?03P>~@Z7AR2$79k+J5A-@ckpqgKK_YN#ZtL}ag&UU_`u5A#@k2R? zLgutGBuI#_H>#60b z_p$!fWQ!Y#=P@G^#YOOMW%hFRG{rGaEL>Q!V0QeiQv>~7r0V2hCW0}ooA!zzgOT@m z`7O0E*XC0FckHe?| zH>XZ?!X!fmbir9`8mN{Ie!AX zCds4oE#i<*bBG*XDU0T0(D`n{VWA?*h%AmM6V8AUzf}!;^wCA#l|2wOh%l)o1iUyV zTH1svLT=|`Vi4&6hkI|I zNp2%+E&g4-ldmt!wAvCdzjOEgyF-S(^g0&$(&EJNgy%08W3v&yC%=BR%0H!Q86eoW zAy6j=s;Xj=HtqF7=`WC>5~3@KeahvmB}+tO(gS$4|8#$-DLEG|&N>2sTn z_aF1Z`E~F{k_M<;7|mKrEToV!(X`ZE13>CI7rc+lX3yTFme-fLG(=2TgRv*Z5=M)o ziuspIwma8vZ`piMOtt%NPZr2sO_UU#E*BGpW3T(ZJ};n+OlhG0OSP!|ZZDe)j^o}F z&OL+$KSTV)KQloyA$=IBNLpWn#Jh)L{2d3sXGx4znT_-Kedz8&f8sduW2NmHnr2hy zMR)39B^T%(fr|&_?0)pEJuk_n-pCOdrU1hIt*h?@(zCCn%6b>Kq%|T4o82Ql9=##ikNAaXW*t1P=RKO|XN~oX3t4l*bhz81SE8 zd3?tUjtz#WUtnxB2jyOI!%kX74#1;nBI_XFTU>PpHTlu@*q9{Pw`@EylrWgS_3%9; zLDjxhyf6?l4FcpGu>Ud!w)w4CdJE;cn4tr~7%gK%fT&|q%|**Hi3s)-u3s3F+P zLjTO~?F=Rurf5ITzfXHiw7!an+1T}edy%91_Yp~+`aeG=%G9l4)ENvOR3u{yj35($ zai;eX)9$;#o!OCxn2-Em?1tmqJd?gvyfzk&5G#)f7J={wGGmUM%QW2wf^1Mi25G_m zT*^TkWl=*>b1_2r{~pyk9Id+s;2Y3F0j-9GA^_fC8KZ=PTpN@vAo=SrVIY`p2nZEn zc(ATZDFwlxZZIJL60?$lV;z;+BUdM939whpyh6bJAY5oKzTd!B(1wf=?Jf4B9xSR- zK6X!edi5AlFyzWW8xwkZ=-%yL-k17zCqV%WT=ZTOr~9R{iazGq)$Zw!_i*>KdwjyRFRRrN4su36NB5St}Gv1{8ziOl}V`nY43J5Ex*9kRmhtxSk))kE)wDkRTy`Jii>Jp|r$|CVwyBJCJ>W4*K{%6bX%i!FIWh z1wVcbWj^vlT(v=_6h%r_;GTN}=;Hg~3S`~04;XC0>^-Ov?&tY>*B~S>WkLuP-r~0v z#H-_%x4p8vsF9A$ajGQ2tg=X>jT$r7MG?UPqiKt&*PEt8@6zX&nqR;G00x&K`)mZc z_<;OsbKg=M+oSnPPK)KrlRq~#$;IVkcl=)Df4lKD{;K)3wavSZT=5*t(R{?F7{AB+ zF459{?|vt4$z<$xaONIfKw-~#>aRJ!@W(9j%+stG)SK5Kpp;UzFyDQ$zuFx z$>_B=dc}NCrQET4l%sdMiExi=H<^5{LTZHX_oh_Xx=G=>^;#^tO2Bn4>dtg`D;i$q z?o8{8zsvq^hK57tr;D{YBhVw$OpNr+z3+7fsxOk!#0>se-ubG2_(Ogr$FR)S$(ZOD zQ0^{V26j_gzSHSY;E2(-A^5psI>?Wy9mFqk&ln^|OU*1a1UP?`lze;~2EVPvug3hJ=pdv{3m~X2w?r(pUKXXi zckaXJy+S&fbi_C;AI1IKeLTvG#|O8E8;ldD2HoZnN=& zZfx~YP;3}wmO{%4gAcQ`EVCV|Nfd%c@n4deg2ps=>+3fjHcBfu1%9fv z*8ex9Dx{0j`Wh5YRWb$_sk{f?nJd8Cvdvabk)sYGV^t@rT&^+e$8#IlNF>x(?E|(R z1{>H8)H_K&(Y9(k8Q?Mij@6ja4>1;|i&a2jJwq{7@2 zq`_LD>x0cB)*r@GOdirrg$nOq9wB0!WALk75mO;%hxbM!sYM#G$;`nnqdr+BP8a#E=fUV zorr5?Pa`+CY?V#Z%AD<-HP<;pCH}h^M=I=SdF)*8NbWU~)f8NXBUByM!A332XYfCD1h`Ov1MPbJ2i%obhQApP1);wL4=AQTvrau$ zhY0E3%zA^_@VIM#<3-7At5qM+uI}G8<l7=RUlHlhQ-v|;$9p#W z%EB7Fw>N)`F&E=wL%R}_(todwIaql~Gbvn&Qv8~^99fGw?DDLBh#1u$GPR5(ys!RN z>lX4YPKFWU1bC8U;9LKIs=N^{pUr52EteOim3ZCX3KvJtew6^exZBej@%Tse;wbaj z9tG`|Ob9JEHlWcB*r_zojvc`6kUysq74Pa0-gaN{xIb4RkNs3%JzD-wZlMzrV)BQy zJ#-Wg@$n&UIB{m<Bf3{(zs4r0bqSg$I}@?n5a{B{seKasbA!Ts1H5l{{;GFa z($o&E2H8KOi(BIc(liT4NmP6}&4soO6`hW`Y`855r?7yFpS zvGdo`>xzzw{d!nlTGH~5_?e{uAz&{dMoHfANF{sPr!6H4~zm%vfzRu=vbFuRyx7FI7R_NkZ>1-eX<( z!tt@1cPyiOr@RN~8 zrU!2B`fIZICGg9>J|8K*g=_!-3^^hEa0EEH3Hp;#669AQJN3WxJzRvT7a=Y8J1A(T zF994;P&^pe$UIu9;X;F^hot(#l$v zPx1QhloiAnPhO~(*LR#;>=l=HIKRuz_$qt7sLq=W00DG*U8aAOSyQ4 zsu7#~rjYKNDSS#GwsRb`IW=(+Ti=Yz#KIo^SZMt&h3wA_p3jh9XW#}O*E`OI{v@=L zq7ycfNlNVv4vYw?Smh8jV8wpJ+^e?t2p9dn(K`w>+s9h{+lq&`hmXiIs&qwZgj*S5 zN9RZdUdrFFO|$={(9-3A$#ieQ*mw9CpysPh|Tp?iCJO;GQ zZioygDY(~(udxk-VD=Ak|6(s7yo0=g@(&QXR>mYSQxH8!ae^eqo@=kwv|V`Vj!i>j z6|5x7b|_euB0~^?B8+GHt(M~_ZyTBd573RVwiy6V*-+L;_EqtWgm;pJ!)}s1Vd&>w zy?xUyFQe}ER4J3hEc;aZIF!OdLd{N?RQ_6!UU1PEYhbg*$Ps}9i8 zP9C}^rGT+G7FdWoVX(3cxH5tRE)8r{4doFoRy1i*@2ilN$D=0`9+j1tFjGnNmt2p6RBlNMu2>OB#xM!I7(3Af- zoW3y>fj3~JW-vOvY|m$Lqx2K*HbnbLm-T@9{dsz>lgKMa@oc1jYa+_4g`%W~rH&T{ zYC6~%Ya~o~$ponuD=CLuwT?LsDFJxEoN&#*>YJjzqIrhQP<^yEaM+H}SWYgb;{NMe zrYTsbUdQPPGcWIx*ODwx4z;bhUto1$RzmowG4`nMBr13nSiU8hNnHa7UIlZ--%p@u z9KZIM!yQI-7CRk^#ZCv?RZP5D@j+6=J9V*7~$t22;HO2EKFFp9G7 zTvkN)vLhiYzR$D}OY!|XUa>+!S=4SjudCZ(Q=SVF9}(~cMI1w__wDOKRh7TvebQrq zszDmtHzK3Bj$7b}7(LLZK%b^FH`7QDCpX~-B}iJx?OVuB?S%V1NH#U69+B-HZzEsnns-?3Re2#OSu|aZi{1e zx)P^ktZ4_mcE!1l@9F73eUp0}uJ&JfAg2kQ`|m=vCTbcznae*HND{|e*R^3N3^@Oa zyF)FO{h<@mKa7(ou(Ly2~M zG1LrLJ%b0thFZ{cA7hw*|HSTl%Sj18xmiMSmayrwubi~VR}wI+Zhli4^N=Dn``zvN zJsU%WGxy?4!O9RTV8ub><;_@LFx{SVSMm|?C=-2*D;s`+l#a)DkbU;6&cuz(NrRNA zpw9uxS5S~pNr=KIYA;T7(alU!ntMhOj~lP!nRIY|Im&DwfPU;)vyV8>eeu!L!;7vZ z9nS~QMNoC$PeYaWWGnFDW3E>t>IE-v3`M$ zv@I;pNmKE2k}W)yGho-Lw3eX~Y%DZz9UBKB2f_t;>2V>)oCg88k~m}|Rw6HqG(c?p zx?-6mlcX$0V(1vSeI=1}QY#QA$=10nUkhuHOcHsPd3>8Gy#uO?fP05dWr^~U#e+cY zz|v8Bia~+TGT=U#N6@npF(@DDgA(hZf*9;lsB~f!zT<4*HLZ&qZ8<^|cy^^8A;f6^ z7G!AIv(n5I1ck8pARoS7!lx7t#DknzJ_Z@R;4rzhQzrG!JzWm2Q7eOz^?URxhISC> zqLIMli2mDU3otQu^RBO>K!1lj_vP9WaS0&=u+(3J9j;s$uy}f2Rvz}gCi}nA6^Kam zUAEZ4m3jh)VISiqMS%ZWj7(qYcB4hl@~OKsDEw;gk?gI%lt%LX6%Sa(g9*rJ>qeg+Cdt&+q2m3Aq_{|2)(k^?L-Ym@Qu!#l;bpC0Wjt=F+n*LSK z(lK$uMy$2ffG@{vgyxG+0!Na*cuD6eLj6BbU(ot;8%IaI_bYFH$l3*7_%F1HL7@TL z#!H^x0Z*1b4*bKZfo&PKOL`m`6dOpB4JMDc6>rz?Cm4ZoDAEtqo&%=%d$AAluVL+8 zCFt3Y`Kn^-l~g8UjS^u3P@$uScnD9d^?;7$?f>{gybCFUVfW%8E^&g}p!r`xBN)P& z5ks&U>?^XIr=`7l6B~t5e?vfDjd$hfq$Z`Y_8VS@O{fMhbAc0V{GB|7l z3XnnN3K7|1UE}wHgNHhCW^9hvnEbW*#2_q2Z-DMVyiTvmt&$)@oOt+=ELhdRdnPyW z{8DbV`mE9CO%+=Rzi0w|R%Q3O6wZ+QO@C?#+G)0sH)Ejuh&~{{2gIK{G5{Cx5oRp# z99K&R1uI+pMZ{M%?jma?`6H#pV-wwv(0J4)s;a(UXk9Q;3I!`rF(t%ZdbfuKssm?$sWGmuF-=vY0Hc#^aI{q#%wr4c~NLFoSxn3tH};w`fh z!-A^Nh&Qb5_qdKzTx3XjTC=eDKq_R=Hf}6@hQd`Rm&HIxs4YUBk=dQfA z(c;J<|!9QH4VcupN8kemZh4mCzR% z=JpUkN4}4D`*QCWC?XIFhzJNW7>hN4mVSv3-DXH37gv zm+i4rFj9QVz0RTG5#5!t)tKZdk+_5}Lh#Ehpp}*;%fHeRDhfB>%Pr_OV95i8;<76~ z(g?;+K?;&@0xfixRA=$rBq0x9@qLM7>02nJry3L2mA}y92ES0V(*(PoU~|#_nV&9| z35_l&jf^fkkBtzwX4zG6llQTN7YS8#4lGDrrlf|%let4OyJF5~W2J7R4Z}%5FN4!Y zPE9Xj<>@{2acjID^3@8J6yb_zItVFV_s~1R=5047}C`l;yz z1Y;D`SFOFumN`nK*pvn4QUCoCZ>7G?K}-R_vxvijj}s7`fpV*X4Dx|5VQ|3kfGH!| z1JyJH1Gcrh5J0GYvp&)w)ID#fhKMP*D>Ofa`FhVZ=N2qhA@I_fLfgD9&^R-Hqp|+S zW8?N~Or8oVvvDU4yT?Q9M_(t_w9SK9i}`6Sev z_?}reVmmC5riA%-@!>(CM$5oxrdb031i~%Bk{sx(*bmg2SQxbOb){eGzFz@m=H@7$ zltx~K#LaM89u{s6Cy9Z|S&psz$FRJ7w0+I2BrKHv^6J(@kmL=HoGC|zo$FG<_znuX zsj+}^W=8#BF-9DU_L${KC+2!2$bBpm&7giCm%d#qz%zgv0>ChU8UVyy9id1&qb>P* z3+D6>ad%+-^`vgY2LUhNj}j52m$zST-^JSev77%N!3QpUy|0Vx+3fQ8uZB5x=wVD1 zlfptVSb@kAA3t;l0y+E)%kQec6912uT;^?FspZojPgN`ygGf!Yq2--TcwtVKx;LL0}A+0AUqh#sr0T zNJuzXG+-PSBx+LmRSXGPdL?H{aRS6}bh>`Hfhhfh<{$s?ln%|ImwGmQui}}QKyeU& zlgO98CGl9UTA@NwKNN-yOTge3CDa~^o>oF%lF#3UpAg#D*Tz0x{yf$P(CPfkW0Uaw zLuxY1&^-s?;Q}lltt=nJTNxp0TFE0U%2h2k_Nl!%P{~!uHW|pgJ5{kuxSyZ25GDFP zLSM3WufYht6DIgku-S8OwM%XC{`i=?_t17AB;y^8DmeNB4rg5|Ru>2jI8cFr4HYKj zk1LvdgaOXNC})AhfCHH&g3aCKdPPCmYMOG{=b($m2f_mM+^(1b;xtL z6wC2Kwhn>(k!@`>f%O!J3$j7bUuC@t2`&1LXNiw*`qsJSr%m{g9-`z*DaW~q)5~Oj z;n8Bmo|p$pv0>PEi8}m0FJS&|&C>)3Jq2{olOls^q=E4<^bG@-%g{fUzFX8JKo~&< z!2)n23QU|7ZG+)Wc5(@b{_FMSS403LfG$J;YoHzg52!n6JA62sa4`o0^%d>=#RJ{% zo=N~p^)KEM6)<}6KrA-p+ao(k{|J;E7J*|^=S@_otg+%ojrD&mg+QS+5^&ML)$(MU z_K_d`@}Vn#NQ@B`u-k^uwAdTDA)3m_(1#4LC`oIwt-;PzNOQ! z>Kzcm(6Pn$X$6Fw3nXQ<3N>{>jG1I%3Z4DzK(S_@!~MrRhB5l`?=XtR-2M2kHYNBX zLirFIHiNoR+$WI+7%_nPBNzAoA{TZDbG<2}l^gI0cHeC0m7see3&Z z5WnHjKKC6ie%6=5K9!?!$$k0Yr_CB)WIDTzTy(idWp@w1jg}N_)Ny?V2egrgV1Zz! z(@ivi+DE`>5;44NLTA6G%G}two$OKD)@WDJBcc*Z2h-NWCdeN*e&) zt#8Kk3_WT_ zfSjQ;r?hqP^<6*S&k2aoM4l4Nd1QysJ=9r$qsP`44gFp}=jh0@chy#R$1=9Y9 z`gi{cKErF1F`A41tKuOSn#R0;zdCNd`VpYPN!h=!cRjH;U=~(eX?o}8s)C73V8xV;6VPr6ouvW9FFK9R9)Gy zUq_cwW5l2j^8oWh$ZahPnkj&VazJeK5~;}pU)J%n>;2EyMQ`#)E8%HjXpR{&0tCTw z9Kdlo-D|b>`!9l^DuTc-hd`tXz@!S!5iw1lY*RxJ0Wnw_rLXVJk>tPVmGD0MAA2c%p$5_}IuuOD(jLoK_7 zT&!^UXp+i#$#g6Htq+@!TK93;{6SkUF(lxpDQ^^(}WKMJJbVBS&Yt#Y(%cvCC6z2+3a;OhJUIBtyL;!qqPq?7#u7yxZU1wN!1T!x?F`r|nS2Y{qG zpBJusdx_H{1Ofo!rLYFZ(-e&9KmEVH`~Uy=tQv~PE%)y}e~f>%L*|NK3tX&(QE z1LNYgj^@y;FIT7S55Obqume?ag_JH&`MXLi zR}H_`f`AC4D=o$+-~2h0E39u@cfdjnG)W?R@^nu7Q$8EyanlmY>{fYP&0`rH2hw#X)RK^aKCvN1CtOJfQve`_{&{y4ujAOO2f zHDWIs>tD1mBq?AUj!Ekm!2fq58#Nl{I}tf{tcSZhkxI|M2h`YG?VJA$Ht&skbp*|^ zR0X*E!fRW9;7YX>AKzHN|K@E3FbybuG35UfR{w;vU~s8pS%3fhV7q}e>Sg(pf4Ber zO)kEkP!#g!-fn-l9d-$_QVlC7_b_HC4=3)}2!`lrtu|4rNZNi>A8P&{Y^(WjwnC>{19*nJ~%~f@8{wFgs@D^vbSG4 zUw$E^{+<+pJsR&fB<)qMc%T2$ZEfv2Dc&6lmoW$Htl9q|!@_=I2zSDAm2Kbu|KNkb z6JAFxo_0x!*L=9u@3VsYQcd|O%r;=EI7VwmlkLLouCZatU}Xyd)^RD|9;`JnPpPrzzH6Z96(YaWfSmw3*y0K5_GO6>lbWh+5a0mLE<6-rP#9&WyK5r) zD)|cg#CKQDPw~GVz{eroy;W6|%X~d(cpD7DF8|VX>0{0VPyO!Ab&AVf`xS-j-4$iO z{(tcP9rw9E|KOfsy+46|R7Ss%F%9hWN6_E|U;rcYEOaVj37!W+>@xwa7jLE9%fJGgBw>FCLzj81fRG}H|4y^GYM4cl&p|_vZ#Fyr z=wRXw&vN91EXQQ1){rZZiE9UAc^JFz(KXHme8_Ej6Sl@c zD{P?WJSY0M*#20JF?2>1;2WK>X^99a`6@KE*3c z1nFm9Xa-6Ef$TlJ^F;b36S(jTcsx$k`R@dfZF$D6)j#dcdvOjT@U@Beeh%O!4b%=N1EL@d9nu}k^Y(8O_=o!ahrC^X{{Zl=9V}>EHg)I! z^IjF!T?D;c=xM^!^8;4V6{U*b|Nq5$uUHji(^)ZV`F83tYSpcr{UeCZTo*rrMemChZ=fG^%Blib6;em~$EhYrk2 zgX9AoOf2|)00BM{P;cf&2~bIUI0ln3Qe4m^pk?;BE#eG;@g8L^#j;HQR6Y!e;MaG~>7h=-Eu9E6($d z!$+=to`t8&B5;?(Loe)h)60{cAKr=}BmIx%HLtY+q zy>WdW@LF%VjGx2IoVyiYF(*eU*xRj?z!tTk2E*-|;y6K1(;W*dVwZ(Y>>sDwb3nL6pOW%I~d9kC@9s5?XgM*JfW zsLuePcYX^Yn?KV55VW!`|3BjJ-S$@^Z9zECbAaRBPWTLoEswcD4uajkewy)%etcpv zZkcDxHY~yKn}{F0f_c&5&!{wTEn4>Xz{LXqs=NA7U##CiszNL@9C6p%{lFQG|3Clu zaCkKhA#!?3e6UYpz$5Rnbc2)7Tl?aQSCM&p-l>DFcWRykE{(V9oK_pv8T59Egn%%* z#^b>8?gAvUm)}7<(cA3kdid!hsm*yH!`f$c|InHYIX0nb%}xMl)6Bsuq1ecENieTj z8@lh_nQrWk^0zPc|N3Frk=06njMMr%@%dwkEh8HL`2YSAiAflj^!YPDBpnj!B2f+V zJOzS_F$PFeD3qFd2gX+*H4!E?zfSJjl%5t|H*b{%j0$xS+d z9WWN!VO1v(R&IUJ_VwFneNbaZcioRZ-n)h4$BJfTuUUAJKLUuSX^Gr7ev-^r`7!o6 zK{7uHQ#&%2ncak|uD(D2)CT|r0DuODC4h1HCj1@%1pqh<1Z9#sq&<_C0XD|bQ(>uj zTo>BA00M0v+Ckp`|NK^{TnPdOuax1^;eOA{GcgRu0FM!x6DN8Axl-K!|KV}!nd%5! z7Ibhp^7;5^9}kfJoUoVVTjxe5m^eOe%+vM52h}asRIt$nASnPQ*M8mj0C&N#kZsjq z8ZGgo@6mU(Gt7)Sr=CQ6(qJ+>m-5)?>mMQj1D0k_z%UQ;HIK4H_WL;{9!=%_NdEx9 zKilxC%idQq@|V;>3EV)|jG|EB0{fB;PSC@3Cr#Qz7zPaJzkWlicD_-adL!#{R}m6l zpWsYjX5g*7v;`hW1^`DnneE^ZOxaub%qZ7888*g0B3I#v%#1Z!Mzg4U^3;;^X6a@> zek-7%)vlIuJS~5HLt9RC+MI1>}PaQb47)-{e+K?7tk)2 z9Ae`Wo2@(b0Wm3n3}c=PKT8XL?v-a`i~TOaqLjdAovSArNPy(`tS+fmS+qzT+mS+>LcsL z=fBtmg(*avIFm_qadjjGRvKlS99BB*k29J(zrMD6-`iaPBW0(}th3VL+RTd^Y+ z)g?4$t;0&XzH01SkF^)3c)*CIPoXrfoA1)EGw!W4Uu4w1|VHLb51op$qwkK z@`SwHP*Q}3Gxb`izM0gJn`{h{WSVO-3=#L`8B3k|LqP7|XQ7%0&2~>2)&Uhye-#}w zNu7`@>(>l`4zXu0ijaIAsP0zHh z=0*%R@Em203Y=>{9~tTXn;4x>kB{3TK8OyK%UI)te{6k74}7Az@4ScAvnTEZ45hZt zWk5>)e_dBtc4s|0zx~fB9P5n$PBN$-V0w^U^?$0M=iTBvI{E3(PfdCQZ^GHL|J(K3 z`+xub6&Yp&fB_J%02M4iUvrQ+{qZ0-Za0tr{42=4qriOJu!y_HwA;}#@-v7S=q;6!6&kJ@cv;8j2BN-Twz47t^hn7|E{sDj> zuLpGIf-#PbYgZEd`~qQ&!DdYOJ>keW4yDUCGeCkREy2Wh1k+ut$&Yu(B!lIxSnk_^ zCHnXD!QCala{N?#t!9bT0T|sT(eCO)Q5&~VpxZ(LJcZxa#&O-Li6$^yfH^kTFo^9$ zxmtMo$i-^tU_@`!o#dj9ekX?HzdP>9DW^`8rT;qJ-i7}woOwaThgHU?=jAsf$S$jX z^=s&_@9vIHYUdz9)S=O@20?}MwuPZ`Wkl}F8^f=E<34FUsyq1O?bqrP!(@}5 zOP0|;+ZWd#_-d@}JKtZ{^#9ZUhBF7<|F2`+{XTalQSJgmPYe*wh$Q$qWIG;T(+UbWRMkHb8G3h` zix1`=U-fr=zfaAbFZ%!NM$oipYa_H6yqE9rXaxv0$>0Y;q$RI1VP&o@FzlOA-grMv zME$Qw56;ov$jl|NcV_P&VL)i6>mxqGkUF)!k{gDygex&s$jWJ~+Hznve85gnc|bGg zv~-|kv`@G(>7-Lt1Ci5a*Xj_5liI1`0$pU>Kk7ARARLY@6_#5*?GWyk>~95FX0U8G;}cA_Ybm_7ev|oy@tNbGH<$CiIxO9B_ zZzt#tmRKdShV!@M-dxfR$6){fb-Sz}wZU2AoJ5m4U+TH4{8psFlE0v3$M`iB;D>}R zK%*DXB{g)^~G;C0svjQ<6cJa-SkG7WW6UuGtT zt^59LWzYg`GUe>I#3sDmR-clFrMvj}ByDo|{6G5mSZE5V5Xy^Qcz4eiK)*ZzUXbU0 zWAOGmXw1>M|Nr(9ihcwJShZEO#I4<_m(NU>tAWWw--Gvap!}qTvY{&0gY|V3M?&-4cdTxlw!KBb7Xny>Ys&Q!9MtpPfys?f!U<6F_ornmCy=iTbV!q`z8Z`ijWMJ zpzPWL%;yKhKmZU2mj1DS|N9ZX%PcCiVwEiqvl8|H_$E!pTJ}O!jJ-1fS()sZ009TK zEH=!#HgdZaci{w8EM%1WIAxkcbEQ2erX9@TEO@mxDUO0f2AvF&gGbJWBg{2Bnr;uc-fb(WGH?;sl^$(*ZJR{al9FA$F+8B8~y$(h?Rj)$^u*n204&5yoNG^K7fs5)t7}?|i??@~?2!XWqrs9pR4>)l9{|8Q5;Nl-0oypEfQ`21;7eGnxNZNv{y$Hy4>1f^MH}r)o~Fc{)po}i%6{LT zIw-P`fGzJx#u4m5;{6*n>Hc*RbFL#8kvHjlXlRcLY4PgLh;lWb!5T4bQ}|&O#L^6g zI%;qr8LV6k&4H#41nNLidM#0J{ozXw02G2J07hV2>;f)&lxzQO8w2izIN#_rVnPAW zcpH-T3B$&MKexD?0fv9O{R$0jFaS$Hw7;OmjJoTxDvNN0RC|PxIOYloV@$th{UFxb z8aSD646%=kZmEP(N&iO2QHLHD#NvLdz|?38KcM4rGmzh~bdY`~76g&niSUrIlktL` zmxunr9r)3v=5Nh^^a%Ez0Z~Q7Vo!1XYgH=$GZEqwMK}v)8Zdo>5<4PW%MC7|#6Bfn-!SpQf?YV85|by1oa>gLI@~E1|8eU7 zFZZBxPNbRinmNWYH~VzpZnKS5##LbI4W+#V5QrVrldxl5Bl>cNY;0a0W(P@SstU@g z=#`hApTSE%zw{Yd0nmUeU_Pj#B@1H9&%f8+j=yJggkVfwrd$(Pyu^9f4*3!VyY2W@ z`ENC8#&$+K>#uYdA@`_I(a1_Xs(lywDld2D=pJs!MZT#sSP6LXV{f;Y2)WmSDE;n% zFayUw01%5Q@5V|Ib(7{AkpVL!3%kN^R-Jx_<}r#HCwKw?hN@=m$d&*|Z@?60dK0YJ zuTS?(Cm(?r-_4}*@1t~!$L!-wZ)jj35RjQY!`K#uQbQgALc2Rem7IauSc%VR;L|U` zn|!VjJk^ZO#}2_{ky;JPcjs4&89d!5CgY40F#rlGcIks-@|vFMJkwMOsa=A^0mx*^ z{h@>}iv>9^5jau8nm5<9{c*;RqB}gB253v*wkN}MA-yr1OSe8+^#gGGb>BJW0hYi6 z)IeLH!Icz$tdTS-R5k~k&wUBd7qX9VGt@B*rmwgB8t|$S%6)tg)4YNLlt23+2?bv6 z7|ar8K(#<0;d)=rf$S1+j67e`-`|_F!Vp&{irF{;gS^fS%wlo*88FgaG1nm?Eu2a^ zhex2(5gYUQeF%WLVmL9junp5>IPaPt7#E_!IIY(9{M@j-e~*1^`<&HlfFDD>kw#$p zwRDU;hVb0@V`SB6YX`il3H|?|vjwCA!D0llWDjIiC@jly$<30iHzOX~qQ`OzS^7Do zWB=zGVIy?IVl=WCY-T$xV^+hV)T;007&h(pQdjRwRl21FXowCdz&0vqEEJ}!IzLfl zTZq~88iQaCgmaDDO>g5~QiYVjxb}(T=^m9pDR!t=t{LfP$Kfu4%~4!}p^x8QnY?~PtuG@?@{gW_U|@KPDqRU=U<0%u zcKHYjGg&RB!uo%lX@;MJ6mnTiU69Y(-*YZ8LqEl5pf8SwOa|iBe!-{jsCx;;AX!GkI;A* z%MX&X3#W~#6Z(6+Kx)lj_Kp+bhODq<5v0kmabNMArvO})ZTBfB2j48h zU;p&{UqR`A5hRSHSV#wbIB(DVe@d~gPk(z~@d0vHI*c93M1k6+tN;H_L#DskW~L+K zuiQ`pGuX5Ly=LbXVW0ou1l2YE`_O7XEPK2vsst|R3BOkQz-Qid{d$jIU|4`S|6^k0 zM4Qo03QZkQCrsX9g^}EJG3VskeOxrOW?%oVpN}xA9XaCooJ`k`XV@~a6A;M}4|O0n zK%0eCoVk-QQk#0KtI>?no3H@tkz>;kj4CpqEGW-bz2UfMTv1oEZSD3=u;R^cQ6GhL-$ngn5<54t#F>j`}KmUcE)5wSN|o} zq_Z-!JG}@LM}ZA@SA$QjV+jD{mu z66rfb%E~2m4aq;BCqDr!k9DPBdC+~B?UGXWk%grC8P)2Jdw}|RMhcRe|~j%gP4mDd>_~W-z*G>Z_X7StWomgiU}Za3J-aNb;n@% zFfHaCHTRxKlbLQn$jszU3Zg(0o{yYflDBSZ*U^n$oqQPrue+H507rsw$!(D)ZSaOG zwK*)+bfhA&bN#m9NKyub+GSCohar8Q1s0oPKsuD)927kc>t+BVla(i2RGCycR7L+i znvLy7^Q;Cd6s5z3Y5jsHOd@k8a@AELq4AZ~>%KPpei|H_&IdfTe)|IL4>nAw7E<*j>bTKRU-B z*zBius7}gX0vv)1G3a+)roa%int=$uwGM|9(gwM{`Tli1fCJz_?^`yD5II{C_vZNd zrOc!rkq{nykM~#qEpPhWTFw;o?#0($5QYE%uH^JMjF4en{$B)U=zS`AU`Ip_6?~pzMa~gD8FK@=Dzk@A5`%t+ABu0r ziMRc^7^UH%^XQ0RX!+T{rl0-XUED;BmEXC+hH5mOkAJvudDF^} zRh$QQJ9DDEDMbKemUg}?_iw05!Iei7_=eX<6(`yO!$_~@P_p0~aeQh$N9g9%(@ z!Z(^*cx62}Yy5oIawUFGIEPPPqp>@%psX*zPD;K(7fDGh{!|cug=W}o>H0A$Berz4kop70aQD=bK zykWic+x@*xi*id-$*7rZg1Ax*qqoC(cWv-dRL3_qaR6)t{gAL#6(IU@?Z`Fr8iS1`yC~@KMNn@z4I> zXL&IoPOI>}Pvr6c4hY6uo8lHY92ovy{@<3^UVhCMt754&4h)cwZTw6=nT4%7Mj{W{SFGn%*eSSW|BzO?TBFi9(@oj?6}3Zg%!-XHUuL!64zeX z9TvZKJEBzhu7s4n&mK7i*xvnS4#cEEqC!MR(61EXjkGnr8xsH$WuEkGA0;R%!xihX zhhr%&u4+EJQ#pP!@S)myi8HDK{}UK$m2Ax&#o4zIbh9N1DcMy*&pW0h8fCFuq{DYL z1$8n+G(^!lAtPY7V4;g`@=qwp4ItOou+TZ5&v!s4s%1jX4L@t=NcvSH)ZlNT0L+L$ z3QzaTp8!D@#NZ(`*mW5En4#LG#1RPWM zes~Y>=E9nO z{RW|PQx4iZcp4pJJn#DFgu#?uvGu<9?;0Uoa0@Q_i6nByc?upPeChsqQv^|c1 z#+_AP5kkMecwv_y1i9oa?t~6G;{%6S5BIpg`ufw&mXCP;mt z92OzdBMs5``k_w+0mtUje{g#=x0{5NkQ(`^XYa^D8$t|uK9P6Zd?8YMs1pa&L+6dmZ)sWh<&h3Bk*Gn4dvL{tKM2nMrZ z6_zpKAuOt3=he)r+It^Hz|0hBnQ6}dO8aA+R{)&m&Uv5QuBT-2?7QuF=7=bdz{^6q zZvN)Sfg4*a0}G|QuXd+RFbLpDum~rg^Zo2T0ufJ7`*`(88Ktr07~Ggt^Z^i2#LmBX zv1kOVPQMVD>kPmnlM95wEf3%)|JMiK|Inym$?cGH|GLBhSwYXm|L&|?AgFb(|3~@U z00IIKOAbj%|SqT7O$&I+^Imqd5Y4nI7I|CSj`f2`93*{g` z)WO)~C)5N;a>JHFyI_E`0}x|?$5#hJ+MlE|Bm0Cu{{-&T-->_=@Tm}=&X&b}m;%j$ zf7NR0iZ%lvg(dk)YyQMHq5OkAjVM*=_y80b)ae-S%?a3nyU8wM^#cGfC#0p!n*ahf zP$tY(lV)y&>@Px}Cw}GZ0LomI6n+MG#}9U$qyNM30H{m$V4eQhGA5e>(EH!GnG3SS z7m}k$#u<x_ofPd%qCM;x#Bn!?leWZ8Z1FfPa->HAr3dyRV0FTW- z;Yf{>-ZpGR#y*ooFcZjdu?MMq5V4@xDHf?^}EOFoOO zq_~YTpa7~^;GO>rR2(+B65HNQX;M=RN^&c|nfI2OCqfER`rgk#1P^zqZ6G$eSNjTx zh*4OTK=T(vi&}{DS=n#PC)KUlgNKM|N!joi3s6@C&=00Rt4XDe=Uo+}Pi)t`kp@=% zp>@uU-J)D>zv!6>sUlHk^PwX&DxW5DnPsF5rlt^T$8}Dx{0=@|cfb57+#O z009VO1BoFr%~Sw&)MjJw9L6Ff-cxP5y;oX|bux7Veu38a9X{XbqZuF{HQr$vtDakZ z+%48Sf4cSour$fGx;d2SNmc;uoaN1^6aIm(&_UBZc$G0}BBsOEgnUROQ9#446C0U( zR@dU-u#+nx#|ceUfqIiF&1b7siX2wr7%}4ghUCHnnq z=k&C+zw`M)1u#3XUSy@`j+KWC{>;d%qJ!3TmQ5cje0gr3b+X(*3xEjJZX- zN8OK^(Cm7JF_XaQW&bW|noFe6M4tA$lp46TJ-wH4qX4+Yn*QrA1QtB}yI4PX7H4oE z@KnlrQ-15a0;vQp1)J+b+IXH;bO&XSE{8Y&+zMRLzpmX6A8-Re7k>aLhBt9CWq;8| zA|qa*4#*A`m{bLqNY2|Q00_6N3so6#NN=Qe)svMw6oxrZO9*Uj8cJ*!5~*#BWxaR7 zO)&~s7x#+rB@08(bNjoE{5f2v001mLK=_?GKp};~7-py;8!$FkOt5l*i@ZXIrU9@F zhH!);p*zd~0n(I&ARG`&=SysWuWu#@bT0zhIF4wSF$QJUkum1t;_l;zdx$6xU}W-t z;ZR`6#tQU#M4XYVCYGO^ti!&Tq{IZrX#^_jD1A&^NNdvZnt8i&@ z$NT3P$dWJaCZh;*auj#~9kXv>`ARt%DPR+KIJSv0hn+OR2AJPsMnM40S%Y_Um;XQt zy>ZSk!#2S9oZ}sA8^SQ(z-uA{K(cJuWELNRyJhUCryVE%z>^;$FZ|D0Z;Stf!sHPt z?|v9{%TUD%gTHyWyp|@!>;+Ka8@43>+(Vo|vjDyiNYV&GH58NVQsrZSNIKw{3MFFR zqQEcV!Riz0*dkCL8EZ4_uUH^VSbs|C{8&f;$*Rg>%7`uq`P>~Zz3`#5OD9j0&gw$Keje+$aAT5CdR?sAfh#_$o zd_Fs*p?j~3ya$^=xBv()XXaU!%xh+B9~FP!B|cohFmYIXNlxGxbHY@1cCeX-%^Bmb zDxd%H02q~N#P(hU1`QQ=D}Z;BaKjCAEt}sIAKi~HAsC%+zN>-3%cxMg*f^UGXP^Q8HIt*_CsaetNY|P z>4-@D#J#wU-t`)ZR6tlcyJUu6*en1DmWf9QiM$V6ZMOT*30nY|0fvhJ78sFF5;^)} zlF^S!MH)vFb7$mScJ%Ho$0yCd%xYi_u**1R7;wd|Xc8qXy6djj>%c$s9~fW%apZ*iIMC1lC>zcG95_$7vH~D* z=T3M7mq5i8s;{TM0HuO03kVR3$qNa2QH>A80gq6hKr(8Itm3IPLI_EKv_AZ_3T1C0 zm}akj;nWlCZF7`WdTeW6G*o||gh$ux7(eBW0r?^IAOFx> zA#J(8-@pI1R+Tk-#Y_5VSxmxgyB0ObP>cb?yOhChBH4|+ndI@gR|$KV}6FIYvv z@v7r;^_|W=B*+L+A)*Bdw|Y7(Ss!m+m;h<%K%N6zLXLunk-f(R1C|6+a4uKyn*Fx5 zDCh$o7;L!&SMyy-fh;J2X*wVl_>}=+Cdp0sfFK9ofnC7U?L1-s_8NjdAl85H*4O{= z4Hh2(_4nTQU7B=&c2EKb-T!5c=nvh=8?ZY=S+xazGnN!{c*&$3gQ_zR4MN6JZk>s- zJ{=;KtP!s*#{d}605G+$SL@-$LzO7r@DZI1O`E^qLI8Af{x|>#6c7C2Obn?E29VxW zSbQc)Z^2tepMpUTG9LJF4P39a!Qh;9rca(A9o`q|lAE6Lz(3JyJOw`WV=Z_4^V&bB zM|JvrG-I~2ZW%pKP`dzZ({jE3_X?_{CVyg&D1BXSV!j1?#weZkN zYK?LAo!Qr4!dNS>RA58X~r?6lDub=<;_f1Dcf8^5Ru|`;wDXMby(jU{KQkia#f~~eKF8|k0r=TuF z^zxyPChLf^4%b6i*0&HQR+8)3XfgwNzxq)QiPIIE@OWfY3jY|N=6_iow6;PIzZI&@ z{iPWA;m~vpLyFRvxbwyTBnR6C!t-`_FYoX>p}T~i{M;Dc9M~>|rMKKjlL=j?qwa%fta zKOzk>*Qkkh|L=2dZ!fL?s=CzEOCN~yE@WPt=VbOCT&xPR7~Q}N9TlAC*6DWqQ9ztl zbTlz2_z}9z8G-81zq1Jc_PfN8lnOsDG)Edbj`KrN$Eb7zO0mB6@nMhSOM%;03tff$ z%bQYOQ#!8jNw3WdT2 zCdzLQpvdfJ+VL!=psO_J4(=d}OdlYGoIVJ$XK}b-5_r+KT*lnZ^s({Bm&J?#gHV`IsBQ3891WIV5 zxg1#|z-dKOAad?az|bf!EkGriUr0$r2mo&4h(P&ikq7xW9%0Gx`hn+wW6@f`TH6nd z;Mp&5J^`g4-!9q54HzC8VIkm{XlO@WRA_n(jG~Xk!yqXqfKiQ(L`%055)dH-7({SI zXgCd814Tm9X+S;|6kuFK^&^~fv&*(Kgbpa;cwvB`8E8N<8q8!}(z$A?saFwWj0!v+ znWVuwn3q(J|5v{+fZ3i^rvC{phR5}ie?2v9iwGm2qE<52%R4o(4ETZI_>u?W5S$Ts zG5@!3EznGibXY}!Brpv}1vkZM$MoH$)F~y$%WVKdz&!ylLKHwgfInhU1el>ugYG9< zSFXFAbL1Tbz-rm4{@s)o<)mohc3)v&gsFulNsnuhbtW4`hK~fOB6e48m5ka$*!c!W zmz4H?39(IF0sX+>0yyJ9QB=`DYKmbo1~OW3H7&@@c-trq78D5yjI?nsx#UTLyCcwT7pSL*`vK&*)>3pz?F@hsoc|MHFlYn=CCWk>(IB13t2#8~HtlIVofNmldR)R~DhpqRP)FC}qH^CLLE1lk?)xfo z+jL!+$fTMPEXnOIXk(oRl`FZJhLxQdX#*Qza!AczjB8p$NTb<3%5W+1L6u6#6sD}e zUb5yVDS&H)3U9qRT|#9!Kq%l7p^;|Bl!PWj^Z@kDMf*e$^JlG2_CTpd%!>ffTyDHE z!9ozpCGvEQ2BvAfX19m^NGB40*DI!Q*s#7;UL;0ioQ<8_K;^K@8kRJHF`$a-m2s&SfdrpwOB2|xr!52Cj4Gc z*T&1&GJsK60n`yajkn90HZrFUG+WEr1BYP%OIXAe-O2?iLhZj7W)0LkA6}}66M8&2 z>wLOi(0p!U3%2$PK&pS$b9dskNGgz3Aj9!IZ84%#D^{4aAPl=+;q57W7~Z0wvo~uk zd4_!oGZkIFR@(^!gNhP`oe!R9kfHFHcr&+m?w*6+D|S*96*G`SC5$5^4{&wmr+$Lv zcR7U1i1EcG98yFqqQD0JltwvwGmg^}A248AX)rrj^8&H;l|2IA#2qe-0_!&7j|2mb zWXJklX)z*|XZK0FYOZWlf8BYVzwix(@S@gsJG{~0A(dC2rm*|U3Pb+Q{#f)*IVkC^f zfFdFlP32CE0pK!;74rc@cD(hwU$r8!OLzff03-yKAW#7|0U%S@FY>*^;1Bnm$R#M9 z9M+QFUB()%QMb!jzbNUQY|G}^;!Y>(nGKm6E!mKdUR$%aAVjjH85wr%yG5P}h}#4= zXuA)nF=~c*tJ0^H0m>B?*bxK+_=u2otM9ZPjcnZ|FQ7laAVttnQ@zh*5DV@RyxhEq z3uC;l+2)w98``cIbLY%!|7KN4%&>qf7{>%yEg1!M-|kb-@=u_D=2eY|TF>!3GNAX2rhE<6x04d14W9UPt5MT8#9ZHg9Y$WMoY16EWLZWB6k zf0YFGGLEvY95wEUW%DAj%jYOig?N3dGRWRkC|*)g#*`MX+u&4Pc3V>Z>_qWrt` z2A~m4v?OB}mlxv*14(RN{+Q9zyu0tf>8Xjg2pF4%6LDg+#lWe>|9O7hikdf2*@b-Z zSRT3otzOffCrYb?#{M~ghT0U>!X&?8eLVg$B)G*ke=VjwX$EAj6NM`^`(6NB4ocDr z_g?@^$+!IIzR%zLF&bQ_zx>p>QcA+(xuG!GPS}7zxX8-*5dnvB0lLsc^Qqo&D5@1< zk)ucM^hZ8rpl`%xWmidBPLyZSk*uPhUk}>(!3j0k=ZDW_Z7V-Szn}1vFIC5omc{7( zd8W(xRij%74`YJE*2WI*1wTP+Utxj0v6e%sI2(>@#x<0?NzZk7KxhI#sn@IutMTh^ ze84cGtB#k8PDHoz^)S=c&v$<%i?6RQXv^kd7nKG>R1hfMr`K1z$CrFqcOFwPjt}@> zz>cvLvCn`o|3)t5dD<0BeYj(5}iu&ggF}zP_NSqx3DDv0@P%5Oa3{$O-_~1WRS_1MihQ<}kCx z1jGrPIxL)70!6(3hP-n(7e$nwuRmpRSmm724uB?uPz^j zoQ5++`&(Q#_8>gYBp`32Ch8M1MKHul(`ObhEaqeGRL20sDf^!< zpH8Yw(qJmxa{(r^r|-*HCkMbo0O~Oym>aPHdOwOH)+DV?NW0lsOzIKaU_P|gF{ z{r&bXTeBF#015a8Y^XReYpPg40BKjqU^Bm93{M##H3<9v09k^V3U!dAnt!`e8(7&# zF`q6%NONxh{Kz!MNte>MVW6Rzn=l{+6M!gC3BY?k@&9Ls!>1q})}|JaO^@BfO*%LC ze@6b2O*Qz$xqR#(!~_}}zTQuSRL2vyh7`&qEq&2L^=)DKTDVZKO2>m%IB+yf7Gbyu zjDelk&H&IN4Uua84m{vNVqs^t%qs3YS5@pJ;5v|W9`@%K4{eY=wI|{2d-AJi5J0TK zu0V*vV?~38SRB9c7f1lj1wCw%xaBbf9mpveXAoo$%K8X)sbAlf@SyO)zz`TvK*6w} zkQxpK`97QyL=VeLeg9i(lIps73IkYKoP7JZV^`7nW}@nFAj||ND5fJMP%;)#O6-kX z?hpFKRUVXmv>=1d6wq*P*FS6YP-7QlItA2>oHH${LQ#?uBs>!qF<=yVHNVL~jV;M| zKbVq621ga(pz*al`ELOL zh}c^n+(iNVFL$!#x@e;Xi%dW*Fee{h(YOR450Ybaz;4|Q7Y}7hl>q<@IJPKRgt{Iv zLx0)F+tENo_0I2&9#K%-elNm}(7>3Hjeu=_1h0SL(E%N8=q{d)W5?ghyMgEr&HJJh z&nn?Sm}r6oc053ZIewS(^C11`f@q>f1pWid(au%~1rSgrhl#R3L}N#$l0w{472gOn32+Bm zpdG>!{5j0WPX;zRUkm~?AX{*$-KXX5_`2)w<*SdKB9KWL23O%lL5lvGo&sI?Dt-Te zzo2Ff3vt;0i!SKs7p~z&2UmjyQ#<_FgIxn-Drp=vC{qZ8K@%T^L2%g2vNs}YSnxVp z_vec(l{Jik?>c%fl%omQ^!*Q^b)`_7=r%tsX*SNj$6|MzL~j5iwG7SKj-PG4dRggkf$Ijjq%dTzBqlVlh)(tn;1HpBS)^2lEL^kQxH*~l%=n@r6fk1_~pXWb~O?8X*hlzZ(pJn{Av z-p{I)j0wj-OXPQ_%e3}m5X3Qtgnft$4vM(ZDUede1gRI_{$sfMC-tZVo0U2SqW~;p zfPyFpu*kMNP*fE`7x&l$ZL$l)oTpp;@&*n=$q1Y#JZrz>DbdWjU_mA>$M~8luq0u- z1A@VnN}!Lr&B2#hcm3SG;Df*>UDc$YKj#8>Bl9T_+h;S2pYU|(mcQI}u z(<>@VA)}`(zDVnHfAv(quQ!v_+p&3at7kZZ0C0dXLQYr*1OoR2!xibRzXqKFR6(fX zO#qz5ZhHV}U@yZ53OApxTeufU5^eRK9-cEp1O`V6s&(cJ=JQSHUQcB$P$Lu-hk_~^ z8XSsUIOfi13}(`k^4@3Dkqk{K#tUu?XnA=%;yW~V2F2l6(al)rDymHe0*7HSL&O9N zyJy1>=i&?Hoc`mlQi58ua>4?`0!N4lD}N{)y8}y&DOVC1i-blH5d?w+!q`Nxc4zRz z^a*en`PIw3_#!e!h7Jzy_z3rq73L-&;4mg2yGMt33jnB-gn%RZ^M5S9TB@k5-;E%_ zUL5nW8S{Rtm;VD;Xub0302Uwt1p+V{U}XX54}n&4=stQFRxW;^xAqQ%F zs1vY}=DR>>gdV!U5e!<>EOovf4WudkhXP{`A>t&m7}fD4-82=$QDH|qei&%NAr_Tj+gett z+p7_`(styR$wF7;h?p%TdNm6Oh3{}5PN)2od3OYJj~Gy>lu_69_sZ^5CwJq85>xdT z-Q~Qor#NK4^}*tAODe}Xv1 zs}-l{&XYIE`2TVK@jsyVl{}b4q*EA%9#9@`)33ZR9=O&WlD_^d^yr_@Hoim>uh)5k zArf}_*)LHqWn%jCcamm&48Q$qU)5g|ZMM#)-(koy<*(6887F~a@;6I`6m>K6JeRs+ z_3XEBXoie37J4WuZr3WC(%JDfK)$A&DXaoP6Eja1V>;f z5eVaBp%aE^c_NH57uhva}%YUErDRJ3jBoj7G=rAl?Cc&FL(xJCwbFp&W!BYeW zp|c!FL)X7S=Vp#HVN1`N7#(aDW~1~M&~K?k27=ay@+q7#@1phXd^oxNDmVD~m4Y59 z9+}L?ZiCzw9tx9QVFM9pe^eB~z#@N=9#NgWWnmF4q#LP-Hc0tsU*xY9%cK*Vz*@1Dv#On?vlFY^f`Xo2!_Rj5VLEimfiRDiSZu#-z#)v zI3Oq#s9-CVgaQLYB^e@Q4=qqk=kR+jiDwcK0Y=@Xearn48zy8eOzWY6v20cwpS`Sy z7u-f24!0zP={rRHO0EP$LAFrnl9dS=io#m|gpFIJEiIUSDRO;fp(;|}j+u(6qezxf zSzF85nPj#}g~pm;x8YPu-P(2|Y3_LHF+ znHT9lg4osk+x}r(*)6uA3qX|*b`T1LS~OHqa#>XySxav%UPb1S3>E<0>Gd~D(7`)H zv5~Em-hv0Dn5=(*Jyx{wQPL1oiD2KBce?NylSm3u_Q*Irik1smFG2l;L&ZWz1Gmk^ zbNT&~lwM*o{JZ{1C1guToG<=VfpyGU!CX)}anU*gp^pKN!1-gm$($e`o57qQ4?z43 z0K5yE3jvH$5{eQr!O(tNY*EG}US${$#=fvUYg0OdsYWP4JmT%~4GjzilqE0tXbWOj z;z~yWngLF*`JEWr>wgP)(_0BI`#E>w+*!g*5Fcs<`?T+}yq`?O_nQ5iTLmFtR)e?U zXE2Ef<|qF@q4byM%rjf@NI)A9SZ+g+m&1th!hiy`Wq=3pIWqT`s|4i&0LBspsCa>O z_hfnBBH+idS`g zYaBpeVh%g+``Hs+vb~2#PQHXh-(_JNlz#OdL`j(XCSQYj!l2$X5i>0`dkUB$SirfR z3`zdj14SEH9gE0+>Y-&B>@gYn=Ia3>ZYBVL8SlRjdmqER$Sd zkA1XCVjdIU6eOHt@lixvkyneXaZTF6l_zyKiQ@UN*|qaNY)C5|Dj`jdf^l;@FkmS{ zp+)}HawqyBov(6t+lor()kyeUn9KLrN*rP+n>p_qnb3DH|FfEW?%$$tis&%P;{Gx< z`Cn&!kz_FS&;B1^4NstifAZ(T0PLl-fvg#!z zalgYH5Ps*XEckcjHT_NPWS53#+phLK^3v-L?}7BOvujd+FN_W4E5JdsGd-;`h<$7mLb(#oBP;VW{3O z7gTVC(MQWm@oh$c3Bu4nq00A{+KW)A%gM^x-}-f9?8VE8a_PHqn#(n`j@z zL#d&sG7mA96N%_N@iy;-gES0uu-HSC8p83XVP|BlTdO@eLh(p)p zUf7lW@jMhBawj*BmkWlVHuv~MB#*};JENBhOTvLL+Yq#RR^+#;VT3^6H<&Ywa@k+j ztG6k!>9|_QGdTSXBq=mY*Zgs)dliWag}y`YhvVc;s^5V9K_Asl=jfbA@th{YV|U=S z9qu{My;Nl*t_U=_f-)d7bY!uV7U2bXX^bdRBAm>4|8njBTxuxN(9sF$!;5>yt-W^J z??R`AS7!(|hCMnL+HC>;KR)+`zr*n-^8d$1uETJ%I#j|e3+YzI)RcpN zL3-gyE+6vcrx3&E8|8yzp zEj4@ydzK%aUN(}q^&0{{RPYW`u0bpJuP5Uy22y&A2kV@F|BhD0?IOPZ#`?L9f%k=d zU0=rSFkuM5Uq;mcgWp7y9NBum9m$KI#VLdSZ_|f${4@>>kS;mxGI9Pjrvl{09#YoLP1DbHB-iOi$nD;69i9w^$Q{pn!l5 z1b7t!q!b7+4h1ztzyi$~ZdL6ozw{ME*2ek%stI<@k;O9LFOdnN^XV)t6A8BwFDT52 zRlMvzRom2Cm~6@>B;_bbh5+>^KR|_{Wd7XA9-9l$w}Cz(Ds4gV8%ZZW{7*^U??V-& zTj$GD0)qjINpw}0P*4uVm^=fhCO}pJ02+V9CcuUJwe{YtQK(RbAE@^+Y8?+SJzi1Y z{Hff=5>gEoLWM?%AkxSi@7>9_`pa3zx_~?x^;2s{+Jx}_Lc@a34cg=V0kmC<{%t;# z4`99D2MZAijIed$zu?P7-^c6C|NY@#4*McEy7nUz@jhKHG>EJ_#lh&1f=H74eWW5q ze})U?rt#zc6@!Tq?YIsOV#^Dk%oeeoS5=_FsWJ|6wH{<#L9qX77wYDl{C~IW)Er9+ zlgjKfs+x2a5_I8;a^KDVBm75I99BSaj8fGDZK58*jizt&{~6a%IGMCic&IayoB#C^ z3-wt+;s+C@KnIFgduRhp`{?lHj2;jmQx|1U_w|6m<&Sx$_Zu3SY;@5+Xl-nmwE`T4 zi4ICd?f9pIjqnGFAPvBmF+a_)4VVUyr3x1FcHXD9H~wF9o7eqW9nTlJcQyTsdgaL&0yW9cbN*uI-8AN!DKs1}AR{>SNlvVdow9XE% z0F*g^GZ8y*&`{l$2IMPgnHim$fJ`Lj4~Pfl113WUyzs<;Qb6FznqVv?DOJeY$2dLb z9xRC5UA`^M4*2vgk`;{tNQPZD=|y$m90ms@9gZD|jNH`FfOdeRTY%!qiraQe{^E;` znNdF01p_~Dt61TGtf&aeN*ad&F5rM9K&zTzzyO>G%F6+WrF-#tbg(RjS`=K26hNt0BHkVg-*-jX zM*I)wAUi%Oy^FlDlPj%@nn^_!1WAsyvv;~6KXm#-eKe;@o@rNToJjQ zde$Z<5&eJl?eH5yxE;HLqq>U}7G%j?;5keXC=Ej#NDW5>))yy@w%9#j2S&U5tXN{@ zuBDMnL%?jN2)4fAXi}Y)^@5sPYO1SJG1;;oC-4UR1_(r)&?U$jc(q+8+*fy*lSMib z-L(Bzz6UE@^~@oH#*z{cLaTS;1Q8o$oIN7Olgz-_VD=XY7rqUlm{M#PcLB+VIyn4t zIT@5(q|Uv!<&8Os`|aUWqD&i+;mHkM?f;j9w+=X#LCVa1nMA&fVeC?T2c!T6WxKoh z9SqAh9uN`D;|Vbb6Tmrlr`!i5t{g&)-~aIO(!OitAh!A;%5ZZC#nS%=1STxLCx~ko z;s9Ylp1-e|QFSrSltNTPF(I_=`$_%U9qt*2@oz-l`LRYUE;#?y<^cwB0PQ~y1Mz8~hpOD1AF*E; zOk?a*=(W(?Z{^CFqKriR{Y9hT{;q6)$I~|ZIR^&2ML3i->ODShg#M6BM-}iFXVW2R1a1{g_!|70lQC@Rm_!D80QfVrfzQQ%0n9)ew(WvP z3WYaXt#wg)-*JI}xKb|{yQ!^LP`zo?$po|!Mzv%yck<^H?a=~^FcwxZo~{GErU3O_ zFKjdm-@Ssu_!9dA_E}mdeE-2m0K$Cku~GC!VntNcIJDHs@Qw|V^AcdguV@Ed%fLBI z%PHi#ci|h4*Kzv%ACN%j+PL<(aI9zn%6-Ou*HgmBVz0r2B7S>U-taU%BSfM z>nk`Pfcz&?{y0nd-{>m*U(NsY*na`kX)|2a#t#HoUD5Bp7d%{U3R{Q|f5mWcT8sDQ zSCRwbamlXx6{=W?j>GJ?gYJl+IS+0VbvxFc58T%V;mXTd#k$mZA=;X>ioUdrXIx% z6gw`oXeM=)f#YMl209LgXg*=AZqhb+5mIl7;$D+=q_ZT@!V^Wx& zb;eJWrZseG=UvKjeRAG*>5*Wlcn7M*KdR0!VzUoi1>1@@I%U4G(1=8IVtaQ=FQH+$ zN;ctgWff5Ci2wqA_lbMo8Io?!9K1gmYyykD)I34p?m-`D-yfZXhs3`K2;4_kwdRi} zK=cl}7la8%T!hV?2e5vl1CQEr)=x-R!{52@gdlqBhI zy9V?^1;j_oKswD?8izv&T?%~9kW0jAgV7*!5~dwA(Kq}s4i&-4-VQYjKpaUqVpEXw z21u0>;XtI}NP3>MwxC<^8!46&)_i=|U|u1BY*5nUVW3S~-+TP(VJCKkMi!9`x=sfL zs%K6xBgV!t%RzWJD%(g7M-fF`f-7q{L?|NOuF*^)Hh96D=@}iw)s*C~=9q5Sd&gO< zm}trPOizIJ=nn&_moBAe{MfJ}9x%%2E7K2nzf6Od{0{E!>L}Q!nVW6}aB@SKe2M7o z^Bq@&b>uX^0n}ouXPkmi!W~so%i_yv0bAk-a*Y}YV>{o1?4Gv=Nc@U^czyJ>Ab--s z@(qES2_r81ze%b|a`gr`QnV4c8XGO)?QkApZ>kT}t`)=n91W9x1GNSCyRxrHU_n`V zU9Fn;cWT%lxM0Dqc`|Kfds+@xXoyK)B$Gyq2)mJBb>AEF-rwivXqR9abng0K zKTCxiX&&#O*0nNwe=kXI$SfsC(fNUOh}-xh42w@wHeJCma1w_{hQoYfQF0R?FiMD? z+AFNQfAIh}cRc)R(HSw>92|5B5X1-6OqBLC6XM6cKMR9Fsz6)>&O#Yw8D4%7GhAK| zaEb$!ap069K-@^6gBl^rkal>76{>aEd|akL?9xFg`a{>n&d8bg=XkT5K!k2KcXcS? zwg(OQ4g{JphO|%z&~vcOo7ukesnWsLGlm*mCN4)06|1`Ac4<5`z!*a#tB5H(<3#C znNSw-s%hT6_z+8Ab7`CguA+~@DGeQuqneZo4wOx`e;6_9B}K9eAM zx$;~plpoBsg76SAhi#H5D1 zmiTohs~Y365IUw(?E~0hI1>>W5Kx)OI0t=n@fhZf{=q$cC1NG^I}!lFUr&4v{gvLi zZZgOX%8*Tu<^k;!Ru2KwXD#r_ydH=Fk1fX0bAPQD?ASP+8pTi^+aXYC z03kb&5=+@=3y6_Y)II*HZ?KS)$ctj}-yQ+rvNEbZI&cgC1iX~zg8<~_p~CKJK^5+w z?^#``$mFS|Mi>(CV_m3dCinavzDUd~EmL?hkj$cZQFbMqqs4k(^4AcEW0Vuv;HYa+ zJbv1MtD_2BkAj2$zgJy;TTSJEX#DB!ic;%4{fr`V zXr+A)#qOZN;6dw-js4~JFY@vCLhkdiD&u)~!<8k*t=Y8hy6)`9z1IL!d+(up?~|$Q z(ofN~v6`kQp6Bqh+`S$dYl%VsO0ld2EIR#k$^qrj1BTB1xOZJL{eAoVzxH{03@RHR z8yHSp2H&(ywr#A=*qd}Cde{wmkd>*N*B-%P{^pL;hpc&yk5&JjEZYeI%QH=t82tDD zI9Qi=el=y+^s>vRh@y*43?M`9@%P>IU&3*&Od5e8A%OMrD009IL)qE#(Fc=w?dl0KH)EC59Dt$>2f=7O2Y^)q5EaA} zbXEXl1as|ef2+AI8#(*{yuiiaRpqJyuj_7(k9e6h3qvkbo&Px~lV~L7`s`Ue{Xle&iC~kU> zo&fRykfHD<`ys22tp`Q3Tvfsrke;GE0qRULcEEk{qH*%@7POLYwa5Oy zJ5P3Bz0hpE-{UljzGa|GL=>n=AOgtDL)kNj&I5#Iu9Ofi&Y1oO1svl&&WZmu%tKn; zN4yCs-~%jWu)S;s0>m`Z4hpxt5DG2y8~{j&^B&FkI7TPb9np9Au2it_OmRj9ia~?K zlx#N@u@N%u{Y`3>ngVyqetVh4nZ2xcCboXOugr(aEJ`TYrUwuu$-gEDIYyZY4+~sB z60GL1dU?g*#>t*V^(1Tl^7b$j!{Y-YK|ov_1TXHJfPc@>0X?Bb{;943!k#q2(!jx0 z5^wb|4OO4iF>J{ptH;g^e-wVr`xdhXu%rFr2kLooB1=i5c-Od{*H>lAu5wbjwTf^R`qXcJk&(M&@kM;Kv zMecWHL&w3+L6epnv@TncuS?wVo;kK%rhy z{XfrO2frnTYrj2^FvlCr(6cp&8!8!J6aroaKwzY{zb{xlh6Dwnkf12=(z$`HR5^A# z=KRcN`$HdgGg==-fya2Gfx^V}!D(`c2you~Ry?vT5Z}`l=Ip>$kxpbrS4IaYH34Et zrktn`U@R#0U5taM0|b~*9}Y-f+aOF*xcK9n{Nfk{GI@vwfH)9_B3p-`?&Ye&RP z6M*EM@Zwn&fKU=s7Du=G%V_{bfx*p#R0x@YD~(mhg%coa=H6OJ4w&V{vx72Js6e1#1&D@nvYe$Fv9F6sX}2qKvgiicy_wUZ zsPSKn^tZDPG&-IeUcW&&Xh6aav4T;BoATdk)pJ;vqYx}&hdeua zf9j+)tm!CGDC9AqIFSz|awY)@HVH472Nrv&i+pe9V-MyS1bM(FOB<5Y%|HRTPnxO{ zW;h9vk9IHyjpK%0TW$Qb{a9G5LqugpfCTl>x4b8uvNLE_+)&D)3?s<|KGxQc>D7si zFIY5EfW?1*&sj>>O_+3v(kQ&Bwq?m&kFnmvz3*rD%GY(d>M&5of+37t0}i70kAeX< z$J_#Y?5A`5(0saF_ryXW(cm`MK!3B^<9ASGj3qqW@S~BqtBmD(n40Z@JLNpK#lfApw^XQj+ zV1$iG4WR-9>J=l1B^{|p7aPJ)|4<^^|K-=h5r_zgC?pb5Av%I9@VS<94YpZ5P^&Ch zG_dwl(kdVq>nk_BPCxB86LW36l}mTmW<)615kGJr7k~gMomAaH{E!WMXY9bf;v%S+ z32`4&xLDo6?2vsNm@LHngpK+w0P%)AVT2%mMZ-K^GmkKT8aHOdYQ|c(2U8Jo+VhaV zoLhnEn3teb1Vb?>jFEzUTo0tmZBT8l-3+mVw+Aus6v6rbrZaW^D#@0`9G;F3)?`Q` zB@sS`3xGXb<<(~|^_OTea1{=5^->zus=tRqu|Zg) za5ygi0px_v7v0VJ9|{~~2S4pU>+tNRmEGOl-eG;?|3sDAD14|;HBf{i!ZEZ|@-X?K z{Y;$EEmo#xu&8iN10S4EX;!07?m%jb32U#)yd0z!EN`Mt0 z6|%yRAkvIK30uIv$s0-1cK&Dfdl|<;0aPJYzGCR z=}6#-`$%B#PS1YfL0N2Zh?n2Zhl3+G)Ep!5;R?!O!D^UxX77Ii(;ZBas*V7qVjJG? zqf!8HhJcafm$O6=IvG4FKr|8Vd>nc$UnJ347*zd^{3O~F-0xqAbXi+pEr^00{-`Bw6 zDLw6$BI2Ci!~KQ^R84*0>R;K7DfFI}6BoC6ndUR15@||kR#;zy&#axxb4bBr6+{p@ z7B`|YWIGMF4^;7X7X zK|ml89gP6ZryTO{W_Ss!L$HR9yHP-J@e?tH{l-cH$twNxd|zt=6-3;CAczTMtdPh5RaNV<)}!>eKq0Zb zP!yP;7&roIcd%X%1wEsJ_`U+=XLI`Y7Z49+gNU*uG-iul4gnwJ*j@wxMhX@VBBy}l zN~>>{)S>_sUu1K%WDqmJXQ$ z`;J%h5BPr%72rD6rK0*^jA5b>;%|;taYqv77hss6;$RSwuyQWFd)|&YSB?*XCOpFd z0}CO_^JCfT@A2IZ4I_ztedX#Ksb@HNd{Kjd5r2Wnfv5lW{2EeUraG>ARHM{S;L8z-%$}k90w91 ze3#u~n4V2EQK~xPW_9h0y>{L&gCNzl4xV1l^7TjCvBK>9T?|)+L~;FurR?Z&1tstf zNX(L%@_u_Sha^d#Csr>f`f9zG!x2!()EChzUds{Yv^ZJ?AaBvr@Hi~!Jv!MbN?RPU>4a{>ff>}pq?bR|YN~IOGxrmM|2C)86m@TUNcHvM;sQngTkbP6}CyHX2ki%c?`Avbzn)3gG>Xewc zGM2=1D)RoR;|7g>n(F^iWQ*Cb#&^|G2fbbDKy$)hIA_HQS4j%Lpa~h~Q z59H_$BKO4G|wH znS30S6_*FXh$5GQK)=|NMlSKp7YdFv92W36U8(0jTeb)UgQM_R8w6-kfN>xTXb!#* z9B@<~Y)R`X_4VGLPp!;PqpnSNMeyML%MrsqAJK0204AV%5-?N|3=eCC@dF5|d|Y4% z+Oc}--|!n=CCJ}fm;fJP<#mY0dV7x z&xVR5XceUED`5plAb}2Pwj@4h6oN39FNukcno}Etsg7SgP5D@Z!R?#xyo#hndM#g< z(wmsnlKoZNC&3b&JND&(P=iP>;rZJ5aIwDnMRNn zmKpX7@Sp&SglI%gu_3_BV-1wtq(hOel!`?`QAP!R!6G)Ppt z!G+x*f@LfPo)!a%^2~T1{35r9hN5;|Lqf7Z zBn{*qx3kpKVX^niN9zd9;NVaE-m>fE5)T5vMhXkzri0ezB4RNffJwo_73%M5UV+$b z3@pTB8XmXmn5Hf$7@&Svfxw1egMdn2mj4_Z%@u-GQ{{z))}DOAK~f$Ug;;n$oDB{J zoIo&v!E!6+vFzm4?xYafZYwZQ95?_Mrtx5F|7t*>ke^%u__(&E^ek9Rz>Ryku#jjn zjP8f5v2*KL?XI^MXjN@{t1yaRi!B$tpQL3Tp)4!s2RMWz6%Mvi#RJh(7?N%}k2%I*|LO@^O> zFE{MO(`HiPN~1-|-SNTLdyzeh9A#TzWfh6#LQI5!0WX#ZIR0*3=7)(kU`mDUi;61{ za`|?EX~cxxKwJgDV+4cD@g#VX^rm>K3$Y}eFZA$OBq@L2EEu5iKw^LcM@lcmj3^Zh z9D>fZHY}H@r|pjx$`m9O3#t9MSX5S9BmwbN(69znPKfV2DY|tt(g_QNE-<>AQuV5ABuZs z{J_W!!4#Z!nOH;DqSM`{;9G*Oyhs38lN!XF5(gVHLkk(2=x|VKZKxbfs>?B78ihfb ziqGFKf-y(X2LMPc>J$c@u|Z`{pwLo6hvZ}Mhq&TDCLDccw#?r_;Ddh#K~XVHMf&JC z9|Rz{W^Ji#?}LRp`yCe0Mph_NGlFFOYq!j~lci5$U-ebV?Y0V$Ae=zC85b{DrVroT z9dQvUOW>#!f?%R3c*6++z+9j*WP9Pl!!Ie%?R(9Yn5;-1#g$eoXbcgXwQ)8vt9}T< zfj8_3K*S7@6#>AMjy3b=HDHgTDm#@fD{}B~6yV^!lLLsa*18EQVV@ef05nL5Byd(( zOa~JPUKDc12MkN0QltXSbMjSFvH7MFXr=A(%Z2=4?kq_#OaUPhm^#@<3i{vkL2?s1 zi)(#;j;q;j!3-!UNTG2dhQ=2yw0R;#^&WBy#tllj$1mG$YoL*O@vfMigsBLcXh@)S zepFk7o5A17zkuY(*8ds*SzW*x2n|vg0eAqM#FPN*Ks7>wKpv{E!+%F0XIsZv=BM(@ z(*r@W+?VRn*js;QcEJn3b^zZ2h${D|^>cz-lRULyhy@@8xai4vY6}3ri~9Y)380{W zatk<6GK=)L??>>RiBI9cuL_36Z|wF2$S?)}9A^vYo`V2V=3=7cI!ei(R}86X<8 zU?l35nq;s5B@Gn|wTrlDQ`0Fe79P@p_rlHq@YjnMD|dlRE<8{)6bWQTL0CGlHbz-PfhY3687CNv zK{LF^`7afXmbq%d81Ny7-%zmN@;)ClGl&?V4o10ej4So%Y#1TWE_x!zi3hD^d<0ky zLY=)>$z=%Krgm&WM&vu?EeG~o+In^nZHTU+89v9RCGcG(B#vd+*qp0|=`j1Y@g>$@ zZ%yGJWy=UZ?SfbPZbVh*N>7)6%_Lb~G)igAXejvhZ#1xs#ObH~%6I)aXZg8@&L8Mh z`QR6UkV#L80h*C(O!H&4D^Wd%auR|pa2U|>#8p$1)d(&af-~^$w8+jE2Wy~W9S00O z-rvx46f5I><^Kjj5IF_UKqv9TJ5lhd3EP)!Tx?Vh02vLTj8NIbpap5By5fh^DvAc> z6dD+s;BX};gMlHJ_20umG8Sqw{pkKAK)(gR1i}O6&L=A80h$Uzgg^?M1B^&1AUklV zP=#6n*vT-Ji3s;9cz*5XJXn9$tmMGtHlnvC4S)jc^A`8fvhUO}fu4e-8@5#aVOtuHX0jYo|+D(l@aa2UjPF{);&BJZrNB$5$boXD0Xg_=Y>{hn0dVU+Y+yS=ued!cAuOhg6JRDuV}3otI0 zHgu6i^u!V-)M+vNi~UuWyg*=ZP#6saLrIoU1v6H6ZOd5?Y{G_&Gg0j%Xf13I%6twj|+3Q;rEZDV@W(=3RV< z3Y=L&X*+IKj5Mo%yJl=rD0&G(Ivxya7p_2GTXO1t8VDiAiyYAK?^hvn8a=KgS!_rT ziZO9V6n$BBNu?l%5?#1Y+?NaEIk4HA1o0o<^7$gnP8Z-@;ZiCSGKwpT(-SlQs=P_I zyNVnrkWm92m48*_qJI5lR}c~@!0ZF;4!{slE`am}fW%^fP{rKr&rvVKsWun_eE{$W zQV_GY-WE9*!fceZp~ze)JSYl-V7wg<3Idi{$hf`%wRpbnB;W^^^unNOk7_;gz620T zDcCkt@)u|Skgtnm9Yq`{9*#o~4{qPk@RWKKVt@|Npv50iaZXjdPZ97!P@oKE4u)Ei z-w;h#XCjX!k7M{^nc7({h4)KeL){O@PCej%(<}Bzs=xpM4}l@)90jjoyeEEs zwu1++5^?ZJwJPeL@?_~r`QqcArwto6b`HUv#mr0NOrGpak#-P$12!|(jcd*S5&SC~ z_B)Ya^AgLZ&Z-K-05pI$*g<#cNg3);G0O(baZX=4ULd%Stvo{_*Dj-Ak7r@{p z23cUs098T2mk|`DyYb4fvBEGma7f6J94SYgLdUY2`npS?upQb0X zXE?#4C;F_n^NmMln6NL^{5YzjsBwn7Zlm2$EifpZ1e@TM0_47XvW z@yf?fzq2I5G_K)9m0kl|xqr4nOWA6L9J~C?n$6asgZ`w9p-(JG$MoU>0Yqba{@)e( zJKvECaQ({sYLkEA_8zd0&2R@~W9hOsgCj^?=Ks*)aIAfb?L<~)x&Yi%5m`iuLCCzu z94=3j0I76LCg`+-5W8PF*|^WE5riynzQ>_O*Jp;G=Ew^3a_>LUw3zSoqJ-h zm98O~%)-K_7)eDfSH9hme6!z(MkSY(?1+v}#KZ}jhGe$R-wecNC9d-KRc)dQ!QFqi zQj|e4ok$o-Vv)Q5j2EAN(|da6n+iP?t-^&GSX{ ziIvrxlB;x{UdF_jkgziWJj_$n7jx-`V|1n51^{j#7StpQflNREe3*i;u(6NRTQ@&u z3@+qPyy+z;7bq(tdX+*_U$Jd`mSGODzJ+G^=&90puN07{ zFN_9Yq6;88#{^oclWTsq0%Qae4-7&)zv~hE!pdat78r;gCZ~^M^!o!oSrv>P*hBKI zy%>m`P=hnz>rby2><8E?{6$v}&7i#@S(NlY#L&7G3*!7|+i(HhGJETl+z|!XRR}W> zfJ1=N1!Mu;YVx~P|EPB7hF&CMkO#AwP4>d}>%Cm^v+;s5_m8TYf4K-!{L5R|ZdwERaq>Uu%SlZO0Wkp>Kr9x_0Y(6g3dPkX5Su*A z4o5(wFpyHXqi}Tk*SF5uSfFey4j?gwVjp{U<`~JC1G5!{J-+h?X8vf5DG0Vc>ir%$ zfZzZSPZ-J=xJ0AB1a^Gi-@meLcO#Gc&owwxh09{csw7$ zITYVJZjFr-Ga92AgJUKyY+Lwi#kw)J%{TMfD);?LHm8VKZ!Jdg9AiBc)5HO2QDaoM z*+lYYa-D!9AZkVe`*LZ(0rqqJmFmIZ0Q&I-0a%BF!t7DopsJNu0&GAw6b}j+0?0QC z6qhF5pf~`~iY{KDNd8Eml2Ib;9f|NH@H z0`NzB`!GrM7Mt*3mM+PAW`h5i&d{JtnK6Z2rI}?7?yOujhLrZrAhuk^-Nn9d@NMfz zNRh}@WmIjD793QwZynA<-1YH8x=%6KM9Y6bP(xruj3=uK9ujbqkURe-O}VEJ4m^Pq zvkRy+nq&X_Y{F^`$)3Mo=2!4ZTA3f4fSDsU3tP+pge@)w8p}ExyQh0vrqC%R35-;& z*o?gXKQZ}!y?$q5d-0-vges|pbT(vle_%8-Fd`m?95qx8V&|H@O`Z*B4xYZst}^j( zVX!-T*a24nT3Ji3Z(wf#1hwLPZ7^A(p_`w`I7!~aWw3v=##f+R3ji%3c>t4vWC0og zM?fn8ApjcK3<0pRfUp-J{p9~QDh!tsG=*l<0DCE@X>Hnnkw@RlwgqSd3jz1IOcx6Q zMif(ev4FA3fAs^dW*xS)9-lFQe1EdD$7K7wurA(wTaKZ=Ujl3z0Ko*HEkO_qKvTEi z(7r{TX*)DPOKZu;dXNIjL^RT}87T%t?d{x7^c8T4J|;Jhk`r_w>`SRKS8jjn}>3 zM_}!T&_@0K(Xw9w01g=uqCqgHWL&q2;N)Y~F4%+gBrg)nPb=^8#;)oys)|TNWUG_J zvaX>q*7Cc8f#N3wprjxyIvzO5xp+(9azvolh^!nn z;~D->nDBf2h01^>PsYL*{=9k-F{fS(1<*|iz#>18YZJ74{`13^@O6214_+Q{aa-5G z;Ja~W{eKp-SX(P$X8`aa&mj=OQA3U$^d9KaaRXEmspfh=1;C*a zFJAbS75Ol67p%}o=oC(k5ICR;iW3255?B)jhD9uACmItJ$Wd{xjt6H-$J(^E+ab38 zC{D-p=ViZ@aHJ+rRz~OBf)-xEoR%f?^mGU!`Uchy$mWm?vLgBklmMDY#+XPzAma&o z&93ZLepH+&;>ih=V4?^fVZ#*;7>oh!9;<%FlkdUW^o9vX`#L?Wq>wqLG^f=m93xUG zD+ZAq&UvS~vomgbwnR5}X8(D-OJXAO2FCSO@N;nw5EcCI6_?AX31CVCq%;Kbm4NaH zjn@+3>`*g192$D5kUEt_M8fjfh%JN*1dGqh3y?uV&UPM{PU<2c3Bh=5cTjyIy+|S+ zxMiyipMJU-3@6D}u@9GUC4y2QXyQf!Kwh^wh+|9WS&z$DK3BPQ{Vby~1DUb^$_cGn z&tR>l8vFRdW@!Z)jgZ3H)GtJx(^0cAKh4>l_iJJ_9=oO=(gYth_&Sck-?Vu!b zyMZ8}LMRz&;C&#%;Kn@fo2V1JGuBsd5dcXzXCPQA3=W0}Ya%LRYWsQ`PUsBsZ>vG2 z=pb<(;A)95wl;*a+{tf}Qv1qRK{e4PW@ZuSv#AGK^U&sg={+}`y zv0%_N41`hmiMGEfP(Lj{dmC6wd?J}IfMlgFflwMT^ePf5!deIR2hGr|waidtaE>f! zuH`Te*2%sK0^P3D1eiv+4QN654j1i#V78*BI7QAcp|PKfgT<3)@R&^A3-@);I4c4` zQ}TmQ95=CCx`Al`L--D`OGAAn_y-IrQByBvYDLh92E+KGL)pSoxrrDbR=)Sqs_^u? zru#oC(741Eg0YdHfHo9`1|e5*@UHJ%cKP*$+!>lP$4Ee#5mv4;NlQv8goQol!JP!H z&}i~|u~gEjcLr47GaM}vOFF5bMTAxOIfEX9@qtiDtOG40XPt3|9j#WiTF0lIr2$3{ zK}ohaFl5B9fR+G(6pjsn3qnjqOndxy8^$)7e^rjZ;#fcJV)rk*cDYZNe{*Hdi{gCZ z{VtAI=)XKiJM;Dn%X&wD{Akr0s==GwN_TG~T0#T4DQ*NKTB44FGuj6UG z-EO^V0LO#UfHDB^0{dat4&@9Y;17Ai+=!*7O15>iAuGFMT(oiwg=ze`3VgSq z)I%-dqY0S>eLm2My5HCO`!T`rWxM`?pl1+;V!kKIYCS7h2W$uE2bcl?1nBaN;Z_0J zX;$)3J*DHr&@fjLb>%m{&@lfQBo9 z^^e4Z0>l`Ro#55_ZY<6px}-O0xF`S()pV7Xpq^OoO)-W^;4&{6-%Cq*GL>9JwoTk1 zX>XN!f8WYDrS`?|j6M2Y6R#8JK|WAki=*QUCVx#?X{?A61;LPBFisN)@NZ1vrPl;+ ztGbk?NMh#vxjBVDa3uFY9FM0YQKw6|P!)_HFo<*&1fWY<@0b5@6%6=L<1B4y}Hkt!U%>T;ouo}FW9m$6;Vdp|J+*ll*Z$S1kd*kAf8vb$oMK`>Mh78Ztrp$|TnY*P3; z0qPvW$-%=o(ZjFvR#kA6aC9VxgFs3P0|UZ@(g1X7tAua482S2nC9%QB@GS*v9~Xus zbFExUrXGi3`C+1#Ft^`+l+g+%Z*tp#%;05UU4?}lz-mCID-z@2V7K)l6B|Lx#;#+V zydC?EtA(r@G%Dqz|B(EH#Qp! z<^NU5m4gT}KB1n^(lP*Lr8WV?r(q`!CTL4gG$tm{hFV~8F)Ygh(xzLIc0To)tMLH) z3yJO1fxuWr-#7o-EIHsfN>X=%!ju+|7w<(4GKEm>UUP0jBrh+#rQ$lN3cu-sLJSOb zNQS16ZAw^K|0kF1VhzQz+CO1a&lk=?#+g0dF$=Vc2SJ#K>n7($|A_u0tR1o!vUw8I z%$QS4|6wq;6AD*Uc6S&-sUd3ZgGi8bmr2A~3S~qV!aNrnE+&xk(*wrM|BujDfZ?{w zd+7p@1z-yTI;H!T5Mq_vk#Dm<^}njfKU7@Aa{l7F@zksWa9S%B8Nq|1Y5v{Z%}XS`%*C8CiqKu?u)kDM$V2 zy9z>=LMlDahFFPM-6a0S$r(y3#fmIlV#MJ6Mo2;QDiazPI+Sn!^b)+6VEobpg@+Ke zn!l>Hu}wB?BqA#Vh@wQP>571+Li6lus*rvXm+{$@+&h^W4kS!?kr(@a0OcD>e+r3b zmKdg32NSr(8BM%GqwxY%J>Di*;C(=P)VKQZI?K)W{L1Dc@j3pO;HF|vQGQ+RV!Dsa z(Zq3xi|Oh7RVTb!CdG2~6@&9m3V_@gd_&Up_Wh$M;!Gg@4;93?7Am;7)DU?>LxS*ZV_OdM_y zYpIv?T*zXff*?A!j^F>LQ*Yh(?;~(Mmx-M~>dq7edvH;T^BE38UQSXY?jifqFSh(~ z*{o_#LOI7peknKum8Vr^k6JhFMvMLhG65V?GfQU9ppw&)QCc`UUF3=r|1&=6IE5|@!=ep z_u41LfRKq)s{DTtaIxa*1H1kiF{L@>QT)tCmx_RgREY;go(@IZEIIf$wLg;EX`^|W zQ`G4IO&07j#e2Q+#S2VUI@S0wvoG9m!%MiLcLwi86mQ#uSwwVKG=x9>0z6&RVuuc6 zPCPLHpryOwyUXB01p}ZYKp_Arz##?T`{t91g#HjgEDR(E-u@&Mgne63cVe9z=x|`?^6I6$?Z5 zDp?s#V%(qo3sqdM`F>7bhrCcw6Om07DQ~tH)IVjiuHxa6E;yuvKoe0M4;82%WH&YT zHz?;GBWDn~nlL^;p!6IPjv+9xcJn2pcmxel5)~H`9r!B&jJgA@rW(c$B*rR$bvNL3 zL%6nQdirv78@qNV^>t2#{7p1+9;sC>+5P)zARoII&^F zOH2ogAdh{wFH!(_xyNV>780{(kDrWqhs)3mSTW!>ATb6S5E6EPjfru8fcB3dsGg;Z z=kGDrUBSo5ETe^42PDP^6E>$0u;Fv^`Y{N2fN(oC`haq0%PgThEKnRpGC1AyE50%Q zC~&L*VL+b02V4a$4^oc0ig-Vx#94Fz{+?*4u(cf#rD)*dGZEN`RjaS4LX!Cvm=c2e zg2qq;Zza=?Ag!hAga{y^Ci9*A8GQT0#5LXCz(l}A6d~UIeBTDRyg3(mIDnYM6qiZk zi)Fy_DjsldFnL&kPKNd_xrOvSO0+CXcW;ub{YbkSsG>zUr9r`b@IeS3J_Seoa9;L4 zCNreG4f18*RBrPOK_e0Rv3$2JSR4x#a=>8VwoU(^(Ix{x0nKCtLe7sI)0ZF`r4rHXeFFGa5jW5^IWL-AJ^=VPl} z`tOKYYWxUG67gO=R2+It9QdI*0EDJ|F>iqLQgdw#zdOA@y#=y}<4 z%3n(@p1i;{LlB7V;FhV6^^IZA+4FnuaP!N2cXbEHG3TMA9r;>gNnOck& zl7yy8()BpH5VQ__(_^WgsedvTF&KwF(t48;6UZ1DkNc<1Xhz?qiuF>#L#I$u#{WVX za8jmxV5m2b1wxl{g&@$su{#%|6OdD&fqFK4<+cSU%fDGBi-j0Q5A4$P2?O%0G!D6o z%FTu+Af52o09l{g2}bm0dII=1qd4@3`U-Gm7_Ush#5hFE%`b zK%#z#P|*m&ZZIDVRvSLOK=v&t7hWV`hg#U>`wk#vaMb^#9|Prwe4cL`N4q`uyL?If z*uO+fJ{2-fIbY3Ej8V4|J~80^5FKuxa#5yu@a&YL_3!2o;t8gdq0oA)y}TaUNcidu z<&58atH>mx%E~$H-D3AS?y~Qjc6^(vZ zraO6WO!&2EW{dj*>1bUGb_MX>i&ctd3S{9NYLdL&*gII}@E+B`6<|Md79AQUJgyyj zW+|$DBH|4`@K?^Y%LN146%Jb@MUBeufRiTvwJ1 z_nz00C9I?|PJa-i3H%0)Azpl-QcPk0ccxgo?|iR|7Q~do=7MJc*SZ@a9}bE8=&TdR zh%dstwIjTC%d48g{02H*C#qte4e6bq$((;AUMyM{%J)yZaV2H?Exuo97BLEQ{WVZb2zdkH5)=Fc827dO6l(Te#E%GufKmWKWDv?wm!7})zRjMcmz8G1XvK?_ z*0Sne6B_C)VK7A>V>;Zl>Ha|ACLA0I2O-7SBZn8e!FyILn(O`L zRMb#YNGcQXg2A%cN*a^?3-~e`JdgYAS1#e62?vMbV%`laUXPcs>>BgIq+N)1Bh&aX zyY^PC#Z0x&AAfs~n60ceOwQpM1hHKtSoz5TX3cP@;ImgxS$0<3dke6?=q()cE3vmi ziY^ZPs#+!a5lKT)bjdHcEh_u^t7cqHPD1ot000LBA?YLrI_3m8B=8!X)ybZK`OCyD zXYZS-B6k|6)6|x4#Qz9iLB01nRC~UFY}D+J-pxE&d8)cwXIlL@xv>{@6i)(f$4uw=m%YU6Hl!;z(ORyH^8O zrVi{IkNkYA=J>e9VN|Sd^89GH=&sjrL{Xb{0O1$vVLaEyKZyN|7IRpW4%+_rF?ymS~!#h-j6Ivdi#=%DBR4>tR zcx5Qd?oa*-E?N+ZvI)p9jk)Cs-YMFck1U0u5M<|-X= z`A_Qz@qz_L0RL78{gDdlx4aDhz8q7&Uyrlgpjfya^ueo*xZEry((&(|)9|5RS}<;H z*d1s92fKjnVBjW$V)%Lc?I4?BW7^ev(*Fp?Gbaz*v@lr{b_9;VV?X{o_bBlt_w`XK z(bgECjCSY_7*j93Kk*u;93uX(67dxIXUb*d7{7%kp{asB$jmW&3KI*^s3ayQ_$jc= zK|02A=OX#1JN2Kv6}7Jwsd2EMe2rZF1m39B{ON+1+Jx&vmJ_u=ig!A`^JC7*Bq7$XE9kn@mP{oMopFhv_WKIk4&Eq*<3tceRf*TkYP zZZgs#RLR6tC&r5PBYzm6m?|=us2a=Drl-Ot>RygKr71GQ88A;cXZO~=wdY#ic*0b2Rl~?-(_iJlyuM6br08#{Q~YGK zXPVBnbNQ6~DcSo>AdkM*!WFawAFSM5Fx}|oCJzx&+{4^|^d7*RUgmuKz=GZb*aKpW`D0YV|+<&-6Z^mmqB4 z%LwWF*PV6B_l0;XOgJSp!}B+IA~s>&r>d9u{Ew=QA3bc*9AOlU;x@MF|_5wJeJGe?=73|vR6 zSM|b#N-JRY3=Z2s7xTLg*om~3VT!^rcPrCoJ1-*X?s@q1w2ohx^=6@q^TsH6(ewXS+(Slvk zb^RX-2It2u$0dr<2vUHXO`mzK_xi^UzpU>ijgPVSA-g{6p#K=Nq9{{7e%JpibiWby z6pg+l7njaBERiaTHLL;u__Ilj^hUvGY;frv+ zDDUj=!SYJGb5^&$T7O9e8^*j%Sot3ojU)6OyJ#mTkfTK=w{}U3w zV{BFhJWl2C000VWA?hdvxLcHdNX!1JUR{=1IlugbniTewE&vq+%$8>VbOX->5JxvI zf9G6BQ21yu-)>EJ# zp@xlr0ODk2a2%mXEr@eGpg~U4y>JYyr||bme}j0g829TgsT`qz*cgJKs4_KVDv1;M zKYI0k;Np^;_5LLV9fv{M*Q6i^!NOlZ%fNCzLFBBT>ZH)WN25pLsn~W}@^C#O@sB`c zH=0);!8KjiK=Skkz&;oko)d=w;84QuDJ4=85#`hzfqyd~?Xip~;W9&ji8aHrrH`gb z%Hu*mOFt}}0Inyw`Faj;cKHEY#l`f8485YJfnXs}j)mZ$WYEG`52`2+L7H0z0I+ge z`qJ|lph|#7qR@B8J=>Mqw{((+6y#rTnY zG_2{Bf`6*1Jf06c(yqDTF7Hdv7k^{rZon95`^^O%prdGQ$IxRFe|%mq+>t&zPC3{d zr-;$${JjD8ML-0n07^hq1VF0qqemRv9ipZMeBTUcQ*vc0;5a}t0MH6RxM%|P0g4h2 z4Z%OIxEr(S!?B1g2;V3^NR_^4l`JX@QPHG4Rw#VNF(QkDiu$wzg7}Kn7kpQU@NQW1 zb(+u@0e~VLGr|B*sH^?`Xb1t|pd%Qz-}b5X>^KuS#x+!_CMd-l2fI3aA4otrDC_?g zUDAIp*d73-he(A1SP4x94P-_}Gfh4Iq@Xl21fxwhsIOWgY*`z9;;~-QMZNl$wI}+j z_1~BII#kpGih_N2=51gv8w$eL0O40Civgzp1sS2D%<0v4v{nEURAuzh6zQ6BfXBj! zDgXAG=Es-n$3Z!`U`8)dBi;6Sf1(`Rw(Q?H?evSngOkf|as7Fe%^Zg*>IroHrBP0* z^IP*AC0?Q&JjCD!HgB7+4*~^cPp+)1iX1E>Xo!HMQEwtpuDzJ8GvB{V zL1ASF>|NxOsmjWB5BjS05d{J;J(~O+LeApfhka%p%*21tSV*iI?5Y~8Xz(l>klB0{ z7zmWH0}TdVL@>TbA^fj{7*-R2H5t&swrK3iGY?bDw$(5aP^X8?Qh(x18ihFe`fq*j zjxux0uPfjSJYqv36a|sstyud&;IMjZ`NlT7W zZD#W@!`eXhE~|hp@KOmq+cPvc<@Swk4M{}}W1A@AV}!+|osBVruE9#%=uB*m_i5~< z?yJ|YZ?b(6SEA}j0aO7^2v!sjR84Z)GBJ^h_d(;b=WhdtvWoU=6JZj6W%Pj@&?A7s z6u9q_9mp#@tug$?+6FzMB(W1(!?h(e1?0sCHVyddDzpAfA@B*m- zVGRNtD+(smD16-SpEN&3p>!ACe>v<+5GJ=NaW;`nP~^mi=J05qTaAUc}NL{V}PG7)^9kPa@o zEhm;Yy*zP*2{AmxNAR1+mN_pkVcs1LczVA40bM-Si^>ok&FZ{Q?6bv5-=CLoe6W7& z6YOkY97syP&`c%q@O~OgRn_xS5uv4V*obKB1v!$l12L$h$95pN=jAKtOHbS0#7l`^ zqA3_EnmD{<=%DZI<>>HU0#lO;cfhdXf2c!#rwE+9;sQ+FHB|a-k-n_2gcGRui5L?U z(sSFQqHNFS3%L)0(2lbF7D{##^BI}}7MtShz#Lr6(f~0rOp^FQ%!m2o^LP2Jn^OK* zRt%oWcQQBw4CBJh{{Au_>Eq3o=sFw+z!{Nn;7n5pF!Un%=efkb0#rE_-u=r+jui&W z|ELt7#&5UFtDf|w6{x2!;FGila2nxcI9HqFE6220SU`*?nn5C@Upg$aa2i}Ki z0h9eDQgZjXCWVM+0G$nTKVF4LUM!3J^#n%>C{78sp|z3U19#H5oq(MHgfE&W9u)It zb!Z3lA#&9Xq~apsGzsG(Yog{}{9HJ&aV(-Y?*~@c6hM^YSToS!qZ!sNrlgW`Wa$FX zYF~iB>O*hYO`Mnr#|Kf5_q5mr;yRLN?~r7^qJqJVWa*nS1u$Ed{b+TR7(lQSgpGd` zVuO`&u#BOsBq(_fpmCUE9bLQ*^qzDrY#)LEm?#MMnE-gZNI7u9?}u-$go9v02nHxP zP3uS=mC1`OGq0lTCP?35)A!dokKVfPf9k8{za0c%SaFC~6Isz_gghohR~jCkTBz_= z4;hfth$v7az|!!Av(OLXU!O=jB$z4!Dn43`GiXkYYKX z3#<8jH4FD)J$Y{h;FuYK93&D8LXb;$ih~&uDl7*+)CHg9Og@>F^PE(Wyc4ih5F^s- z$SE~3RhBDwy{Fh^kP(b;tyD!O%%q}hg9N-9@}*H_Y^^V%Ji-zBj|#>fO#65aFXe&7 z{I)xiUl>OJLqlTN_8OuHD}LYar@pjbvQB5{7JB#7RRsNmA>+jtkC&YZ6~3--{ysok z#5gO<+y5%;nk6EzkAK`(>Ect4n|_>3Sg8y~sCAS-|CnE5zJC3pm}fl*>pibQq+%u` z<)#0jDvP&*V2^}`5MU?DVgY@oW}0I+csH_9q2fNWlh}U?D&j^DBWTwUCATZj&z)$EjOZs_r6iDe>1Vh0Y0l|4Wkc;`Y#SdoZ2na6y zt@2<;Ye~N=X@VfACSw^`Sa^6MFeW@$Lp}#px2w;<#m1G`ma_-j$K`hbXKTc8O&Ec| z7iFKU!B$Cd$rl@=O>+j#ckh-h@01gl#>nsJhn5{;CQD6SzxQPD)_Q(fg zfb}mXY+PeEBzUEN>aHo&Q{x@HzJ4dqsTgE&MFu=>Z+xrGzgXB}Nn2|nkPFx>YZoz} z*x(QjUPA1*WvHirjH=e$kbDp-t#EKUL*D>|-F)8KNck(anhJP`D z>0rS}Aa@z>C*X3b(aOVnV6tqJ}<<_!f5%ip)pPO-ILwf_h=RdAEk zy}y{uR5+=riJVz~6b20il;8jm41mxS1X_x0d6!EK$G{|D5fh*f2n-4ljd>^Kr1wHgy*nfz|*^G=;t?;v|cb)o<HLe zZ&SqkpXptzD$C@@y3`sse`aE1&pvty_6tvRSb zJHHVfRIHPL!>Mkdc^4$J2}O4D=W)9?Og4|i!YhqXi$OkcRsLPVQyZ_L^Vng}F1#YURqR{FW(@E3BmPwVK)xBSQefx4{4 z%n+p6=cN{0iZq3M!+zz%?X{N@5c{>dxSk8l+MF>|B@B&sz78wlzi_LQpnF08#W4&a z{ebDd!{fq=$Nkxg^9aDis4P^ww_yWT3dymG?aOEPV?utv7FM6mGLN}FRJlM(A*C3G zp{PW0do3o)%iv66U1ZIlmuL9|a6BTeIB=0I*gs(YGnkctBJ;&`65t?Q?#(5oup$f# z1_C$5`_^e#3c#zBEr|NwY=4DqX9(yWE-2^r&=p`r2?Y;6dA1egIaRZJF{f&EH~!%2 znf=ZjFT7TLo}9U$0e~38$QPH-mJ8Xi165CtB*E8T-k#mfe!RIZMRN&^OMkSFh;tR* z`@QCTAP@Y~-7T8!?|ZksM}i%xV675^Q~0-H=%Y1gT$A>?A(wotmVT7&muJGtzX1VN zPqHQBrw zgUgUb;OfG!cn>yQPrXNyy9MJOqs4FR_wC9gB7KXY^e-V^&s1KveiTHt@S+h*@36?Z zO9%|V$W+%N_T{qQduBEjSC@6tIth#jWFX@3OdbQ4);A!P*~jDqB6^2EQS)iL6@F2byoYLIa<-N-a^8v274zUdsp=}2Ls1QA{F zRPe2@|Munb0Wnt`F&$%uRmys41E30LSCQyjbr>eapa_ zl!e*mUAX~iLKiBairJxUcy)uh27@1HUiFv3X#p~}#v|g`yAa~8H-DknHV?2;$Uy6l zx8P0v8?MtGZ{>pW;sfg+XauAwl_NZ0KEl_7>yE8S_dv17(QXZtXJ@Nsi&+@ znb)@RM;+269nVk98=3xss}Azhjxssx`g{)|@KbktzWrY|T7;OcfMiQ?SR6^F2MN3X z5cljnD<<2{X>#x50WXOa!S|_4-zI&tu_04jBzxaq5dGY6j#Mh|yY=Sw4?MktqW%S8 zUO^c3-faX>EGigT#FoVbcrhzD&?-k|-?Kla2*Fq)2ztL_^?`siUI1`oPqg>(!k3^5 zh;bktVUQh6VMZZVn`>(5oE$B=A`M?uXm7}m#bw(=3LIj<87K_VLAn}})Q-O{HphU@ zt@uvCMz&k@9bdLHp?uE52@S{b#TVr|Puq?#qDD`LU)<PdszM){Ks2y2yO z!ecW(&WvG1VZb!Z?tXN#73Q=WP+@Sj@=z&@dn9U@en_4J0bM)*;X}NFt+3Fm=J)wv zcP}p*(TxxRRf%Da<;)|m8vjKTZ!2z)gdoJGL_&vQPJRaNpvIQ#{ppAL%H0VRBAC4o(&kkE`XAgBB4G?%kuzLoF6nw6rz)KX~2nXYZ zj~9&@CUXXv;h@V8Gk{$9GI@6g#s}FPp^%s9=-=p*5@a2jEbsAv-J+jz&{%5e72#`%op2h`-i0FBxX>d% z^J)2AK*N=Kr4Tys1er*d5G4<{dmFI+gtv?)YqPQ5kU*LO-M)b0>B8k0I5N^p`Yr!10mM3Z7E@Tf7rNGCMxy|Nrp7XWqJ;8 zKfL=DrKmQ)Xa`%F04jKf8&Ccl>cy0@gv}Ta{FWo4=VMmv8}}4eEsj+M4M)yCMX)Yq zcCxj|sw|D-8g(D#{R2S%bPxBJR|Oz=4p0_@NPmNYKb4iLtiE#xs-ky=BLp`m{t15$ zw&(1(P;dZ2phO@kizxOg1lehK>?tsXr;x!nl9g_hgTx@}ugvO?TI`lmw?WHP3}k}^ z4updt^eYgXQl+IN4DK|xft9iqM~Pj)9+(S|p+t)h6w<*cKqY&#{z~?7X3Kb41)zlj zcqfPx8WaA62ir+L{Q{7yigz9Niau!z!I&Tx5m!{Q9JH7xcj6fVwun@P5R>&K3v)l# zTE#-a!0;d!7a?Py@ER;{9~5Xm$@7quwVXOC6@n1s4hSlL1F0anPav{bw*O@=25>bm z#Xn-b>!!ErLv^JH2z+bYy{2YS2~eZg z%B?{7&C#X3P*sb>azCNd4b0&mqXK^~w{K?5{u9go1ioYzDp|84tP@3uUc1YLY?-KK=LduDEL1roiBf;{Ey6aO4j2!O!L@}u0VL^12on~M-sKz+~$<3!J< z4ysoo36!uLP=55KW6t-nUqYc>CMNBbN1^?hOoz0L%1klIr*aUDHTo8+gbK=$gd3gc z{LYr#s3H~#hzo=wzFzPUF%|iSg{+<#(u{WPU(EaUx5F#9%)@v2U1s zUH{N}ma^#&V6HeiaKHyd3TF-mJQhj`0%ctJ6Q}~+`^znYz)jcre7j+ihGrv%S)oOk zy?UIJdbF+S(fM@L5E$lgv1ACm#Ie1757;vZAi#|24ro!$=zL{@Lyy!QfBXiPkZa}e zc;UA3pNT>i1GMDlHJcGt_FRWgkJG3ImC7Os@Ws6 znKd`{5%T#iZX!OWm!kCgf4}utGpW4Wn?4gaK*~zNOvA?^Y)HvUbHM?`u#vqpT-58gQ)c@Eq6VcGXBqL$7(bv* z!V@I)dP#kY|c`U4m3EFwWg5}6GxC}NpZaW9#^$7NKa!&zD^#qNoXtFtfVAx zmV2-eUuEF?I0Qq8@g5`1|BvBb8X0fA-;AWS;{GRq>h%|Z@@4iqSk$`Lf>6OBjX?HZ z6o;S3$C4mjexU!$&;^zTunnLM_zi#!0MrOEil;1T#vu6^8J`}3kHK>DI@9gg27C-b zAt71z7q1t--}?xP9u5NZl5t0C0EgTDT`(tu!%#0k-EdbZ#2hbRsQq&m8MZ8g_muOY zFmLab`VtEh%gUaW#mC`!ux!4*{Wyou*ZQhvo0hEH2}8Cam{C33@~#963q?*ectMW0 z6co$FC`Mm=vF&+(ZujMU2xJ1hA!TS1$J#sCI@a6*9@n`dp`QC{=w(pl7A>(9X&^Z* zAAxCRAIssx7;)W4FJYS24KkZNN-_^2d`S)z^+0ryMS-Hl@#Z*-$RJikT?L%ZG+T(+ha$~&RDNMFta#c~xDNG41KA@id3Lc} zSH81G0AK246<(b>+TaC217IWK>Nv&#a{zGm+i1lUgTHu(;5yPzw`bp|P3TnaW&bdZ z{!yqGxlNPlHiT|MOeKsoKy;Wsi0Vo+K-otU@-2hP{sdweeHc)eCxEhCX5~v=#nZw= z2{RNI5Hl~h5E;ZCGhZnb4?xE-1~gWps%S(4>LOp|3wN`5Ics6Gf&!pC%)8|zAbd#o z71??sxZqqAWr~hfttYnb6dN4;W%0}`j29G#kx0ePbnt!WxA+nB8GSr@50{J00oPT| zxRup^N@WrF@BpNN#IP_kb^IUZ{kW~)FhG7+!a!VMTvtrJziOC~FJb!{%0p86BhWqQ zr9Fc4YwQov(SA(PqruA;&@>Dkz61D1gmuubEtHBE%Bf&j31K0iNJ2;vVFC>pFAXZS zh1F(@!cb*UIsmeD<3*Ib+JaDGayrukDyl|ORP_!+I#o;G9 zk4swXu4F|1gkQ>7+du7yY#j%%e=B5oih|&`1uV-N!TGf>)-3miY4PouqIV>NreP9AJGW zjCzBB$VxHt;l_2%h9rWU2Eo8+8s`nYg5N@Ha)FDJeWB5iNx$uq0Nw*3LNo_Zc!SS1 zBNcWk-1qh>^1;No5klk3ud0jI!`*8>l-vVWlw=4gya%?r1J;vBJ<*Q~3_Jh*>+HXT zY*sCb#KrE>LL&ArIj8)qUjB&^X1L)0J%2|xfWYg+ZB6f;YVEdF)_ zDSA=Lm*SMmXRO-&zBpW70~VGnOVSpX_p}N@Xi0=Jdcco#&zDpO)Hx@Sm%#3E1Ho{J zfRC^ruX-zTrcOt-;r)fe$3xKou;+wIU^rR9*1b-O249jlmEU8KX=9}eaSIgV=qzQ= z?3>}h;_kntmvQj_6Htj_zFl|l$G<@_#e$;)OJrX9V;g>BtsSFd@u5-25W+7w*W3`9 z%_%I2p1~wG5heb4D!kT!4);pPh(`m^(;)kgRlw?bJz}w3I+h{rRBPdEJs+jtI{N^q$xH^X zV^7+8Yd`%HgOTu7;D%%+=Y04vmT%N7R`Dj_J(}zt#l6D1NDoI^5G_{0JXk~~eN(_3 z0`20-|M6KpJAA$j_%PVSnYZ$?4l4v$4ZB(t8`CEw!lsYDVRf*`N%NepNwK`bS(wm_@APTx7bMYBAy5q81(d{Tt^vc1N8pe;rRJ$T&Bvr zQojxyb^_Tp`=p4iSBk~K<^l4zs)ni=D-oeAL2^)Zp{tox>sKY5;W;32c48c(f%KTRW z$1=mfaJma5#u&>W4Mq4p4OVa8IC{iAx#0k?C+j`l zoJd8ea<$|U_)-wfR>zVmXfJNM4-XghvNH{4U`vn&A{o;aNIogR8%iaf9a0X?uYlKe z)&7@Emu$&JAy{)pT*wKY2QRXRbxXkMx>;oY+jA5EKV+d$wGvmqf0bm*!$ znY!|*K4Mv-`v-%$7Jr002Q3F?fur+E-uC<_faQuQ>H~?860Th<@jP52VdUS#faVFJ zRL_If<+fqw)cNQ>aRwM1Du!4fL%Uei30nZ2-q_!L-(0P}s1XFa2aGf}PPhAk-=}Z=Im|)c^za z26c}T7d?IbjbOEegA&~AbmxXdA3#wI2Z1TP1|Dv;FM}>{o?HCDKq15?)5M@eF8IJL zl#W1bwV3HCz~sp@=9@Ct?MoG{LA&!{aW7#xh_GV8=pU765;zDzM!`UII8%j&K-cwE<+A>u)(W%iF)GjseK=)VjE;$A2r)(llJ9+<$5b@Y4P^42hDgQTDeG8ci3iNNX2 zSBWoD<5^#md43pdGI1vfNisxoLP6^^?AgR}r-V`9o)Vg9fe+@CH=8Q)<>U2=`E-YZ zVc?6+JR|kyz(1?QsIcpqQo0T^@k9gHj}_qR$kt%vC)GaagTc_@C&B(C{QjiFvgNi;Jl;D#DRs6?@)~0ysDYf>wCJt>@q0bAmZ~1B<2;9d33! za-W!Oi(K1(Tik@&XUH0P&EVq9*`iL0d~s04LYL2JWGyBxmX|?5`^Ry?CiWG z675|B>=+FLqY;!D(e5ry2g`IXg1CW!)DRjGTpU;0G-4fh*ZQ6m^nLn!R!=b_3Ov&aU?ST*z&9Fuk`!%zyJUV{2}c$1~{0FP5n*pBu5b+ z#ePpEYQSRuf9kFy^7ZEPZX?!FK7YZvd6?$h5ByKaulZGCdVwe~E8l`r>8rSqAOsmN z;6MmUB3g*|7pby!RE)zxb9xUB2SGB?b=F@5L4Zggr4mq8F|ZM=vAyq(8km?j+~sm5 z361Gy;&|tVqF!X!@1G>x{v+Vwe^Oem0#m{J7cM#qu2rB1L{rPvV0CT_- zC`=&D9y?cp$s}fg1@2;{`%J!{BdzcrOuVEYF9H!w#TcWR1S;wqFdKmqXxP5IJ_bc6 zn}w9GW$lESU=5@OFe_yIPZmSP|MBKXK$7hcNCT1qlfiIoq}MOXQb-dZ^2`2T0LPZj z+b&&LH9|ro;ZWEr2y__)2B5Nz%oLMj(_d$3aePPZ?fdsfeEol_sad;%zyLrMNnZ!> zhXOSAK%!n~{W(jvfq;Pp(fWaf?GzdzH!CsecN zN~RZk*;~8hOb?#Z7#RNSq)Z%Y)NL#qy;*rI!`<+JKqN%V3(IjWimI15g{`vygnU5& z&Ct?`ktzTh$QwiGw#%_W8AT2+5+5MUJa20Ul?LB7x)+kMqHE zh`%y$q6F8a8>ZG5vBZcTmt_v@QF+Whs-Iath*F_~?EXtRBa-JO@~45hg92UZAeo6H z;~twBLA+RKV1RziY^f5OO12ysDdZLA=tCNO{6?ta$`%W0m)sI~IvfcCxBE0j{0lEL zNy30DcUA9Mf+D0WLK2}Pn^-t~fO|*Kdn|5{tQ~o$;J7c)WLfvB*I}?nTC3*7J{qct z55OdNa{;`2RIyQ-mQim8_Jo-f>Q_y@CI|Ed;bHm(0~?ax9Hz>pQhIzLug?}Y9K8YP z7_fMbTls#$jcvK$~es>TRHWZ0JiujHAlll)hr;01!Ko5eO`ofR3_FzmcdzX-C z;Nqj%@7;2~tL(nnPV9e_{HxN}UQ+b+#mF4;uBK`eE7d?NcR5> zQ#@iW7)fVd7>eF2zcYIOz<`Gk{?5jX9I0gdU-&`m>#ik4oQhC-^a(;iaA*G0+D<#*fjXGbGA`RQWpi*fH0Act;rL z`(}ioi~L*91lE57i))XVP3-~p6PQ~6FN2cycH)qrVF)OcN~KRT623eG7l#ocs6o6Z z2Kd=Tqk{AgAs~nq=fsLC0Wi&+ZSp~{-}uJwe)xCu>wDpN&i*bdcZTOEIxNfRC>IDs z2+_d$U(Nm#3;~5i;jz)}wG*1Q8y0aQ`uoeRqmckL&|277MZxdjQuN_3prXx6D?d(~ z(=LbWb@)Q*ac41m;C4py(;F{IDQgnKL%$$_KfvIktp|t<5aBne>rfml>EX$LLxUhq z0YDS?fWweU&j$7T1-7K!MHM*{}PJSy#jbIjI)(x%{er-tSa5&MMdZqp zn+PI;-dCmK!{1u$#dQG&!DT`!Ig z@N6J;e(t_u3JON0Ff$>d99>M2Gl77kN89xn84=vRM&OVEGn`;S9bQI$n{Bq=wbS-T zzrQq}F8&9XxRQa^>5W@V`Y0dG{sqJn&_79VYZuH&#ISZG4#cH(eAFZtEOBG``K;1| zp3=Sym_*PrGGCfN;y>R7vBE7;f7$!q9`Liu_8d4-W?+&?@je8Vu=ZWh9_X?M+em%1 z+F%4K07C;y1JY1o9G$q?%eRLp>Mzm~abk|cAOi{ccsfum=Q!4&s=8)$O=`JZDKLET zPc=smqYwRbDD=Qyo>An zx(?K^0_6;$gas?QJ03c^(LE8M-`}%e_aV{$r6gQ_ysOi{k`eLNxr>jNMg0F-Xj)i3ly8QlC);L)okigMbS>$9^zc7SRd? zFu2Ai9FF@%6pl}l`HQNqwOMMA__jVs(~B99APUGr3nd>g;0S>^6vD*9s;O&6b*koN zca-8nKw!~=G181ANoax6=yeCT#)6otpDWNL0ZeAlmOTZ6K`|7Km?n8XYttpfJGXK% zJ4+(ZDfzKa`M9m}#7EfAl#@TQU^ow!=v+*pF;GDuJ|7$~h~_1zEkP9dqgEtQ0Dsg`^YQODx%kl zir@tz0VpbFd02o@$jxup@n;pF_k(VlKl6L2D}*RNJuz6V+(~)>BRCg41Y{F42v}T9 z=f#O{-tz8QV^(QJAJGb6yQXD?4)FXy`~IGkuE&@WB_%T7M^#LU_sT&Fe;!iL91#-p z`(K1gC7b^dI-3L-o@tANVWPd6&}>^}1VItCT}DJ-GA+KlU!d?z zK?ai!;tr59uNOmquFKKLni^qx@BAHX^2_zKE{z*9k})F^fZ#d5$NXH(ba=QI&1t(9 z#lY(~giSd#^r7zvq(WZt%|H&Y5eefg9n8LGcF1j)F1Qf}1xCmOQKH*Ws>mK^A08HO z{C}T8X8*_R5OLU!T|sbMjgNvMXBta=F9d9Npt$er-|$ip%hk6iDMdJ@ zruX8g2rne`J-VkZ{t7~%y#w+KW*~wG)#;(hRG;D4uZsE?7-bHd3w;}eeWTkwgA8T9 zl)t8`i&!*)C_DgJKm{lPc7RLA0?r^E)w5#HeFyPu92|x!&77xh_p^`|0s>Wg6bL|6 zB2tIMejx~h>qn#hB1`165$N!AJVk?G^Aa>fyZ@><1x&i5fddfMY4cg$_gvO8epL zCBK635N6aOa6FDQ``^B>uq#-=cTnNaNT$93;|LbkLA=p*d1nm)?Rk1+eI(u+^mGN2 zf84bx10n;smU{jol&jn#JSGqjEJ%T1HZgZUSXX-YcDxfc{rf>ykSFuQkq+zMY2)x% zy0cfsAK#`!QTw$2seX|dp#pKQD)v9IuVL7daOryi!~ zbxo6crLD?R4=@)07>(#2SiUS6uz5)41z}Fc)eRv8V+M*_7^a{R>P5^cOPM?Q?g0OHF;}zFok9 z0uD+7I3pTP(BudFsRo#2B*5r0#emhDhtdQFMyOy0swDqKMT6y0h{f&LMeZzqg+&V# zBZHU3Xh%y1*33TC8H6+`=pVct*uO#!!I+hegJ8%>gs8Dk6)__cjsvf2QoDzNVg;)- z>Ih&sV&w;}TP>dE)O_Cb8!${X5#QVmk0~2F==bcQ14o2?A7g*jCGV8`0dt?|FurZ6 zyv%IDLno?$QVEXT+93D%-b}$@F&%R<&uxx%1DiQ_=JfE1_JhUFc zfL{AW%_b(iP{5yS(bhC*Abs#Qcf+o+UETez88}0`ip6f}fy5+bP%(U|kcH4CLHnq-Y+PCIe7(hn z#GwNWQv`cMu@iS{lr^)}ObNe-`UGVP!CiSoiG7)4aEmS5H`0IXKNl?Id3(e#3*c0D z8kv}$I$RC^Hf!`10AjJi3>eA#{(wO(Ab<#sZ|}EHZegL;-d1kP>67>IoOq#u2)$@P z%A$eB{_j@2SziqVj$oijrN)y1zrWe5UEi~g3e!mqb`YNLJVdPkNp`<VMRc!!|5Cm(T?2_? z4VsooiJ7h%Jgh!g8kQuvJ2B{d9O`U-nSnW64EUO!>|FpzxT)y<( zGQH(GKlPGWU9W%Erla;qQ3Q83+msL)OhBN!Tj9qZTl~Fivf!V_#M}(_)PvjVZ~ZHfV&C6y<>Bi5!YC3=JTbx` ziK2ekBcbmHfDBx>2UJ`k2+4b;UcST7up=Ol65W0L_AhN!(iYS0?8Pv#gO?_HpsMTR zckEUojG29MhGxwou!#!kkDC0z%=!!{`iz zAp9n~=)r?689RRmZv4@n?(X;CV%CffU;HO@27&%<^jq|Q$RM8pj1T@-Tj)9`9)8y@ z4$$$>F1Rm(?h`mDcpdgnBT&`h+{;#uu`tRS*!(2c=g>jclLW)O2YXHaZ?hM)#eyA4 zkxfelK6=?ZiZvB+MVmjs^!;~X9XXj|2X#Nb*`I&xq|SrLhy3s$U_Gb%U zHAu`voH(b;(=ydS?ldqw1Tcb$Ak->w!2Ax{XDsn(R{Os$m|jUG8|8QcYjMOau3dUA{@3I<|^K1xafbV$T4DxBsK6wkVNZ z`adUu-ctMt0e1KBpN}JPUoPRE#fk(VxE_s)H=(90C%DoWJE(#0zxB&cg~Eeb0cpc9 zn9JXQR6??f>qZ^iHZMuYr2oM2vfX2lLE|1t_>#vhC}q*$&QM3)KzbChjQ@BKQX4|?+Kus&^zj(Ko5x~q0%8JWo&&PrMHEgi!R&t@G2{?i z7hJ#l@R>7Iw6 zvf5bK)z*j$JP@}3R4_Yjya@OD>*G=P%i{tX)~+v}r@bq!PQT!oB7BPTt)NI6-l8%7 zQg7%qrL3G_Y;3JS(~GW??+0$5kLv;WmzjI!Q7=G=$+*DV1mPySVXxwv*0sFFeq)Gn z;eM+NFNyoPZu0-Iq0Yxuip7BqmJ=IZT`)>O{QTDvKb857_XN#~GO3|x^0WW=_(sI4 znU>3EN+ua&AYqoXQ0xM@YuhC-TthPE&R_;F+;m? z)E#D!SrR-9{SrtOO%=I#li0eYYnV?EFTyM$=ZVr=M>ptBb|uYw9xy?W<<=`KsDxkl z<@7a5(nQ+Hz9(Q3HvJaoKj5tU^R_}xhR6=q4*}N~k-x+f0bq%?N1VTpDUMYPQ)P%> zaMZ&P(ik7%i|S+!sJ&|P323`be~4gA_HJnCJ~ufaI_ADq%%Zxb6_=KKp$?#} z%h7n~Oy8Z^O{AvonyQ$O4b3C0!*r)ODHE3k877);iTlr^ci)9QxJctG19y%1%a?D> zg>=wPFgqCy9J03VS>mdWIuNCLZa{bo_8cd_!6$safs%z&8G^uOhG5-?e9BtO6TY=r znU)6V?cjGtMbe?g+TiFC^b5QIcduW5{>=Xwf8HTbD{;chYEh+Snn8g4BX_5F>9+vq<8$CW9AJQ#ChYX}>pA72{3N~epubhD_hi24mFn8k<$5+0D_yCapUhVUic{RiOc&mQu?pQOUB&#_5ka< zhyUo5tX-}o8o{(K;CA09qb|t9HE`a_D_=JZqoQ1BheHj|_PRI(EBO0bwbo5`JM0lj z=B!7(AF6mGMDkEi`6kK2f?Kxf?bPS~Tp^r2_S>LBDxGYi@Ojg;Q8$1}|X_kPjN64xOS34`OB)kkH>dnP7~5vdbBrdHbw!mWSf z3^YcOt6JBphZ$#2i|qdzT<;x?DYUjaU`*fU_3R%7&?s>VPFsx<6`nuFcDn2BH9WI% zyOcw4NWxOPx7{~|jMi{5u|9idQbvpqJhW8U#gyaAU=k|5H`n4Fxj8h81~8|kW{~JO z7cD=de~9`Qc$IiSfG25d(r2bHh=#DV(OVLsQ!+U2Mfg$SW}7ZO4LR6%vCO}|H+NUv zjqaPV!mZBN{~6Ae)?_B#k~-D;$gl3LeiSzkgW}-)gY0*`{int}ZIejZ*!nBGZt~Lo znl~67z!ufe`v3G+qdrMbk}}uS$eaFo>xBv2Ab!9x)whbjo49i`^7vqHf^#!G^qC&* z-!}mtF_=cCEws&T7;R#X6fND#7zhHszrFAS*HiNXDbWS{5HpR|l-)C`etB21i|>kn zv&miX5bB`+_7}QI{Vbcjw7+~Mjq%)oT)avM<^{cOl*&>Yy)6Ac+ule~CmPaYe)p)i zWyNhc?)aAl7_b>OHqteS$)4@Y#R zKnIz71MfiKWr^GY&e6j2`aPK=iNz^CgYZa&%TIK?2t2*?0lNWYFj)nf56J`aK{2PM zLi$uJTSGPPj>cEg*OzV3tZZ8eiwJaO$1$)z2C09GG8fP>0fCd=|z|CBeCRvXrI(@&R22w#HeK9kbz#ugZx22$#A!z`-|x% zFOcPE!9{-e->A6zLQD}jE%nz~eyoFx8Y_q?A`h2u2_Rv@aj|fL?>Na^FI3O*zgFQ6 zTlfA^FGaQTzF3}LFP;*As^CD1^LV^oVRCT`^R4O!a#DJS$xE-J5IGZb`c6L6A^_D6 z=lkORUnt=Q5(~(}TlMb)!2+Lm3EBj(H80#(uhwDz{=!ur{KOx`X=xKb70SH|gx_2j zEURFiAbFT9F3`aU%^<(UAO3s(^6_2f;-i2ddln}Z({R~a>BVyRTpR#5RulvPpdE!2 zKy-6ikLVunxnk5v?qBHQ=Lv-oquBkNz*h4a35I6t<>Y-bcK5;wihJh_S!L)#a~lwa3wpz6-Gp#hQ*VY+#8 zquyC|U5kVW5fk6DhylStLBLwBEJ&XFzn3-c5+?iV{rPqJqO7-iD3uZ*c|{73RZ%0H zB8ptAQe~!w!y$w*SwOQv0EmZkW{d(vXR*g<#}EAw_QOKL=M0ccwB_$6%OeyixUKtq z2AHvTYX8M-S`uhp-HXER3=+Bj>tC7yBoEFJBDKt-F7do@jamm)67pcKDpO{VU$lQ6 z*y!j<&x_WyK>(Ccit=H9k7JOgE=-clUu)@#-1@!_3x38B^_rt^z8I!=`Vw|L#JU!d zQ{9VmIX!X{uYUV_`mDpw2k_&s!JchHdl*b1Ypk!J%o=$33>W%yGB87HS7wDl-FkDD z3;73A7w~y;(qTTl71toLs0)vOA8rUAXl-LzMb+-s&kGa)&O#wUq7W|f&l=|k#7IZv z8DP#q-a+^-w9du!CFNZkSJ1NgpxI!f&H^mHvo~nYQkz9rZ{HC6Se?a1d%N=gZZzQ> zaS>SF+zf1&M!ZOQjYIq=u)fE?Pst^IhAgS}1cU?u%)(_HFkXT4VDu?<0>$ML{t1Jw zd4jJYNw`^B!W%-0Sf}DF9OaFbTtLGHR9#kIrMDPvfyK>|PZaF*yx774yn1bo)l9}W zWj2}_v=jgZWDX?!;Ap4Pz8WdZW4J`T?fX5QO)~9hL0*9KLoESttt7d4P-+%QN;NXq zjzoYXQvuTlI+#RUmY$a@;zd>|L;|*1;ieYso4Rd{UgaMT1J#WvzW7Jyr%qo2KnF%a zC6GvnH&Hm(!Z+v2R{8J$qO7%{Gl2v^2o?u<6Te{8KxANmfUKEJ^2p_5f*#EW<4O_j z&9~Cw@h@7gT!4fTp$h*#Xrd#7PbUmjLGR!i=b+<8rVxDx7#zb>{ZVop1QZqQY7)O| z6bY=xU_d*a$$gKbVBnLscz=uQq5efr!ZMd>saFcJARo%`R7&CYrRVZy&OrD;uXbVe zu7I=$GQuAEtN7&zARR&_<*pv?9fv|y`{`_i-{Ay@V)QE=kH2#NNw1sIvqVb!)gEWg zLroKyLrQ*@AhF(dKF+Xx=HMwmj}MRf%Xkq3ph}gbA|Q4KD%m?tNbc2D*|?9CPfN&J z{u+AM*?i{>VB%j#pAD-I#lB^_A$+7be;TXnqwfIL-?;AoPFHgwENpW~ED6P!S+S}a zAz+c;q0F`*$LXnS9@W4A@yWn-O-I_LG8RDXLqvOS7L_ER{J;E)ze0X!y) zMRd8g20?&<1oZG!h3SJ?eP|E@AQfSuK8B?T4FO8{du3o05D-K{tjC*Gh@*!UgTyeN zJ?--O+AE2bDY9nGthzWq6gA!bK!ju@x#}#mz8L^v7sjX@A9I*Kl0R8c@!2+;9t?z8 zm+&6l+`e{mmDrIytJED?-eIN_8JJ}NwHRPCNzRJOXYcQ>Y|Nn)c&Mz(Nw~$B*V%kT z7y#N#(V3sDQB@f5OKOt(Y~H=~%2ZXKX5|p{AFEuaFx~o_(%z7wvcxDM zySFR}7&{?p*j1{sUkidl$3ndfE+|hds+L`UAQkb}GSj^c2Z|AbdcSgyEdQb5@t~{O zNnX6VYplOtE6m38C)m=b^7`*i4}_fO;SSzc$RL%qxpOP0;D;G!DtEqsYUyL z@_BZ=1ymk_oIJrmTeeW34~t$WDx&Hl<%t@3LgDDT8d9G-1tZj=8B4?u*U8em?2JAb z`Vy<dx=G@S1seOrD&?SnlKfNxKzAgB%86qLh1D{9F@;0t&ysljW)e z@VW$GNCW^)1RzAAP0LAJfW~p`YqZRL)3GnK5{_b_*LnT?beImNlO-CGyMU($f!KZk z-N4Csm0=gD?0Zu~q2P~95MjM+V7Hq(>_~U@CC%IG&%gix44)zKL;Q3cYP_ z@nz>MpNT^2&_95kD7moM&7_Gz#)guBd&Hs<#UcvScH~;()a8>2H35c8` zyT}+|M$13yq4t!>G;K@l!b>rm`im0W4{>_AuDPjP%35C7M{0p)W~3C9@_XqWOpD2B zIV-}&W^yb>T%v36K5FNoZp(jn#73gqf8e|*F*Q|aD&1bGR^Vf$@q$`gQ!$2NDF&z+>MOLx(IL2p6Puq{3j7Sb9{ZH;}-+#ztb)jneJD0NgqRy-s7Kj8z9#i?Oy+5q} zXr)sTcDKwdLyz}Qz2(iD0CwOT;XtS@lCrRTIO}v$y=%&WJubR++Z1l?`lsy}y)vHn zKzX?1Ks$7^Ni>x*0rKn`Lw|hYcF>Vt}9p~iRU-a;6TTWRj)QEzAj!P0?uZt;1Bp$0Pc)Daz-lsTt7?LK?g4j$puIJ z*z(EN0grKuNsS+1aRK1sS!}}c;dVHWA~iqT^?!tte_i-Vci*MjYL3C?5y$KL7JO92 zPZcp!muyHtgOJ$|DMBWLc4pnUWfp%tph%~`)i5bjgaBn>GmpQ|7y+Ek#6u%3QXbbF zMo6T2Atr@!$C*l(H6iMYzz98!iN)p|yFMPR)&mIjsPoi;)U)T;#9?9gn#E$?2%xZ& zF)Hj`=;mef0L@B*AP6hU{}YtOkMR^``*L9^UlhjIznEPFu08LzI>ZCO{rULWZ5({X za@eEhRxxHKJw;%nWq@GPvhwLa+JlvWNBt5@-Fy;+3g@Oh0-&(`n}_K~ixv95`UGOd z4-p5f2dw1B#NQUna;IaIK+!;$C@@I#6h0o$z}gZ*L2P7u0kI{Y^HD12I+4zJP z*;C&|IzpX`z4E>wofC=eYl?*FezMjhIpz>?!i8-qvov?+Tuje+9uVPpD8&vCpLBQ>V-8`_ah&|C(D6 zZDltq@|pe#gbJ}Q72wM3{nK5lu%wa`fjyK?t1$Ba@p5O0GqF25g-lCkpX1i(~=P+b6n0Jh*Gn8l_BEpXh|>AwaS2~L;{ufjcV z0pI5Rg<*FH=yX2qk$3EcBtF*1U02|9j)ml{IRBaJ;+RWIg@A0RDM%=1r2&tov+ZeP zo-b_1#V)R{bV&22-wW#fIwKF_Scb#vG(ks@S3vO}3v;Ze$M8lMDOYb8G)Dhe>Ny1x z{w41#9XlfyMEwuZik$t=e#%8d^vWcvWm6~MM3kH1P3h^U{xK|i*VWPtYW;982 z#-N$gs?-P@n3l1TN203AQizNR|$a19syz>nVoWeyDz7}7uX5xw^c^p zVz7#Xvce%*9DA7#$V&FdN>+xkbRQ%(Y~(wS^J7cziFcQ=%n+d2!U9AguQWK{u9mdJ z&A=^wHvJSZ@&a_V@}!!6Jn1j-a;k+vsDVUcL##ey3_!jHb-HN!tM$>2h<3EuB%*y7 z!%Hlgds7&lSGlFI=18}S77#C3Vw_6gOY1Sqcd{={ipQx|E@b;dw*gnX9H)$sikF{w zlN8%{c@oS+7cZ;wM>S2-d2bJd!iE-6WFr_s@gjCVUxow*lc&xyQ3;Z|X!hdL{igI2 z5lhhh1O<&Jbbg0pzDDo#Pnq_&sKV=AjUlA4V4rLw<>h=@1OWpC#s$QM!f~52%@YnL z>y;(8HH6H3U}b_q*a(&Q5#8!1zzsJo`~CpYo?N#i0gD1)6ab6~Ztm!ut2tiK)XAU$ z;xW`3zXuTf>U%)qS%SfuM>(wD_yz#Nyls_R@i0fiEmdu@#f*NtLRF`NbV#0lSM(8! zrV{Kd2}mn;ntsfyj0ps=pxD@i;g=p8^)Lv^`(H_eRNYr5b(@t`rx*}baWMsp)#Scf zuZ~7TTOMAnl;b43hT}&rk&N=Xrtcmf3LvP#9t(JsS04pDpyC8X`jk2cK7yh}hC(7? znEJ2ZC6hPz1-9QR=v`&qD+#qmDi9B24zTZfl=^=v%BN(RJeE~HUuG_8c3DLmq?c)` zEoGDqF@i?0kd6Egq|Yr-uw<3_L<+DkiH2zd_z?vHO z$YH8uPb~7l`i!&iH~>^dV6J6W^JR<&?IspEh+^-D_-a@^`Fb)w{(&4V69Et76?Z4N zjqQK;^ZP*Ntt_q-%%4CEpG$s(*i3c*(W`|X-|G8cy+OP*}MU9OE>-9AEcZ~y=ZtReD9 z1~{3uC)8+|`VswA7fa|piUtCPds|wqMbVAkoBtK|6|S!HN$&`Mn}+@1&~ZJh$z;DS z?hOVe+qTyc1`N-B2&T#T{vI6pzXSsX5|_)8waq9rC@=&Km4~5~mKPYTxk2tb%fL2M zyULgUDSD!R)n@-6S9TTWrV*-vL0;nQXY*od2#ePP21BHCfIM$PQ2#gh|AG$e52!WG z>>dJ!FNMXg&_6F=@TdR&or^?aB0tzIt`lq@#(D`U%M`HljqiHGAwlKIsc9tjFPF~_ zhk`?72&nb9-S&al?7D*ohDHKFodg_QOM`L1FD2IJ#`O&{6I?E${g1kuE6XeS7bZ`^ zM=^<{`JP`_1>Au|y3o#&_ufRImUT?Eu z5Kg0`_Etz8wzL~b2h-;>FT0#bu4Tw>7{8|F?bU42OJK}IaK6-# zXbS{h1|G#wSV+yDkC$}(_r?|zp^vQvN>vo_sCOfeKoz|w26!E6xF4icxjivTceDsPUf35-}{qm^g(QWo=;^AJ{ew%dzZp z0c|noP*P15i4JjP!PVcv(C>&Z{qsPYH6G!SF+qd=Sh-un+xHMqR8foAtZQbmvn*5t zuXhF_Qrcrxtu0YS8pQ+9Al`pZFg6?q7T+VbQ+Vrlb7$QB^~~?#QWHzkckZ14S4r^dCHxsO3#(%@c$iV0b&kEDauxL16!( z*VoM7|KaE*RUh}%l4UpQJ|AvPCBZJs#RL4_oPtMEG2S``HqqPZ_>2aD=o$vhZU;oqQJ94$D zNgCDLKLF_p#Jcq_v+~}zi?LVVCik)g(gm3F?*N5j?I#&X&~EvS+f^y|s`19_*S=ez zemnq;hM-`D1dPU$IU|w=lNueFk?|q%E6*Ee5} z8Q3|y2SKxDi!RZO4Jrg0Lm>j|4L=7>oG(3ru#j!bz1Sk^UPB(D8~%qX^*;mw zj{ja*#kJ9TLt1*v?mi`)DjRj#{T|dnvm^EKBYDr>xtC+fFT3R^K2rnvGn4RRF}AmY z_AT^$On)~TN7;m_5#IjMG>8*(H(YUv!m-9j6Fu`qIna7|JP0)EwiyVf z%Rq0cu!zob4$Yp;oVrvaVE#ra1P1IITQkxcNx1Z0C~zHy8d1&d-Z`HfQx_oOS*&&$ zFaQt+Uj_rX7dn}<%2y(`EZ^D-*pCaiI=;SHuPatfZc>tHc|~8xLfw>&icb5ho85!J z@R#GMpM$!KJXDqUC`9>pXT-E(#rFKC>C-Qsm4L0l>njPb-t5Zq1*v#9<0i;VF=l?ugY`&|?% zUo6njU`%{MfyX+guRQR0+NNwn#pw-e^7YtgR{3>pyEw)$VKU^4^We;HgZNNJhX#ut zalj_XqZ9_j#>EF0pR(A!H)UD^+fC~tDRQagS}k@5Puo+xt! zElNUwHWlmOXql!;wgoUCqTpc+fF){Gc%XsMG4hgX_qL?y< zzzk{|ppp!#Kj^%z;qa*O_KO)pCq%M$I2vgCkHMe3y(YxfjS>_0OOJ+wiDLT=_01g^ z_?-|T$Ovs?IR69F3ar>R2L3g6b2!zb=~& zG!8Wl6ItGeTLf*fY$~k@V}y4lUvYEeU7x9^zc1doc6}AEc|Q|XUElNY}Fv7sYZQQtMO_2SqkMfbmPEmp%p z8;^!$YKO~s_$er977l{KkcEX2qNDP6%EUiy4u{dnze^{*Yp;|2RFb142EF0Q1!pAA zAquW+zB;n5Komemh+#q@BZ6vUtt%XDuIdZpM+tWsoY8SHl)BTXgVdXxy@EGc*NOz7 zu)X{Z1rwZBPw|UYjLYR16NUo-!3-d89B#rteQjr!Y50+X3roL-PWHS^XROrKYsPwl z0RjvjAuiKVdR(=XEr!exK&(KbjK%f5)Y>UQ#Qbo(8nyH!oeqbpw1NDdBVXu?mJkcVH{WYiTDIf-v0Q>&Hs{8f4@0-Kl&KHL8*KhYW&lA%x3=&>?Igz zo6+-s|HIL*IG38%j+*WdjL|HCvq_zn{vP+#I#UXTkV%ifmr!g7gYa?mMIpX{P?WR>syz3ft=p`wLHZh-Vds3U`M>2_gY z?eUtI#z*va=Zg7W?T`vk73DhtBEl9`g=njyUd7MALzEz0te0qG%+J|N-sm7u|4#-N z*OD(6(qF=_7Q4YXu(Q@LO23vnW55v53-E`V#Qa~pw}2uL8`g!+Ea4MO$8#~d2gDsk?*{io|VZVc@zGo?(=^PeOS%EOQZwd zA*K}^f!L*kfDt?#BXt9a+ewaWG!)%sRKqY1I0XFgKLF1FLtrD#48+A|Uv6^mb`67v9HwB^)AYa13XY*Izx&I$q2@yA zmNbFCH!%+$KZeO5^@BTT4f6Y+MNfE!6mv10;;nsFu{b!H$Uf&!hTXV3v$-G+_1RI* z&^eFR4OTlU1Vc>$>l&UXoFXRozo%ouF8yo0Q$-gpj|&24*f8J=5*Abk z21~*l*rIz>asqS__2usLZ!j^Z*EyVA--ps;N~iPB8ar7;8HtR`(R40|tL{>-FW^C@ zxDk0*7RDh<#J_=^CjJICb3HlaBm?%1eF1oI4v=~ztn>R7z6GG5oE>Lh=}~Uz#xPdE z5KsoP8x;>1H4&V@c2~SGe#v%l`262m*{(5gQHs*Ed-DI zDZ?BC-o@r5ekaJcvaOK0f8@lBu0aH+X2TY*`e8Ar|3mqn{_6K@+Nwoe zpqM(vLPW*ap+ZCUA@1_M9xkjbl842KcN-AHS(F5Sc>oMstRn>_zQ5f5YvM>6A#(OU z|0?RBxFGH$qT=#`P;sBbJ9r43M_fzExRaX1Tnt@3s1 zPjBCo)6+L*>VyfpyA9;!C(J5Go60Kx#58=u{hm0e5Kp zzRvh+wNisCjMiJ>o~Uk!dyb>2Sth%f8K&Tc0ca} z@!>R9N@UALjVU2kn`8lw;_XMGaHQRG-V$L1hTRMxy+y`^^Jl!d6d)kGWv&NY=iAtj zm|#92zX8;almfZ52aj3KnBw@}BoIbFu$reA#|lNHh@68)y`x7(u}V zs5|P49qu(}|H|9$vwedqmRkdNEwF3UN6X4x2((f6_xVN4^ZrVtkg-!@#h<1D;i z3r)Pvnb3wN;%tVckLCdIbKDagR-%uALaNsP3&fJ|ckGiPj;r8*d%iFXPNpix!7g^P zT`wC`z|HzDQC_u$S-NPsBA{d6h#x25%eLqZu+1^Pyb|g(CVBNLWiO zf!i^by-+Y3P@M%r%?Rka>**vHdN1ryT3JDKUc2)SrcGzZ$AwE&urN6zP;_4S_xP-4 zI_dWLd3}rpji5jaLMeQA{z#|x4){o~)45qo!X~h{WBd7uXVD*`XrkEpKM_?WI^Vq5 z!tcPLAj1`h1JG^@V8HgWr_pv%sdm7GG+_+p6oQ8Z&M!CZm!fKC04Zn<09Hkd)(rVS zaUrOyeUMPF&_qvRn`3iZc3$^|NDB#;mdn3STo1rT6k57P<-Y`D$12obxA@j6>+MiP z<5)tzEBC#f1JIz7LGNz**mzzg-kJO;zWsVzZ9jaPJ>~3^PD9HC%96#!^k&4~*+|7j zVvXM($}LOl<@T!w4B8GHSIW39gNdqEFv+8<{O(Jm^toN+1VjQjF$9MofEPeHhLsFa zilC@C3yPM>)7$V5y>ilSKS7xIy=Dg6qxgj));O62|11IByjQnqRZ+(<9&dcS8C(3t z_LZ;4<_OaS*HB6A_1ke;(cYnW&wcLh?|&b{@prr4{%gCux`09h{Gg!7a4R#0RL6~t zPLsXzN4S%r{)J56NZTLgHT`xL6e)2po1zrs1@$E|E#BEmxqVI8@Shi5t_CHTeZL4R z4jx+r!IGZ3{rPt^=mAy$L;)GW`U-d7<<1Rs$<3OIM{vXOHnxGmN(yE0a9N3-4v_u& ztyOZ}k5(@}J_$X(*p>i8L4inl*1@u+8kgMt^}D#Zo(Nn(Q-sdz;uKH^J|G9hI6U_7 zN({wa*TH2jYw-ry`}!&NpI-gRptVbrmAqU?u_DOWrpJ&RsCjthj-Y--Ctc865&2?C zgYqaH2EpncClrMV2Um-e0o+xD7opkM8Q4%&bY5P$G-&@0w*h_yh(NimD2;0D+!kqU z1WaSn?=}T~@?VSN=VJ?=n&kA4SJyEIvCZN5Y7Bh59eg~5f&L2zC(FIAt#5vZlY843 z0G|Nwm8`wO<6{QFI(`GZmWoANMkmnWVR)7f{yWP5oa!p#ocR*rwQ5_uq2NIWKqpe6 zo#D5O5%7H0jt_nS=WedF7oPR|5JrD{=I?UIC!uJX!PxPAgJAc|*nTf#Vxq0vo5B9j z;&q$b9}xhBJO>~ROfWc*00;pzI$8sQ&Et(FcLb~&pkua^6W&!JP_Xb;!W;4n#U}kd zp3!)$S1*4}1dJE-;0`7m9ov{d=3(Rne~j6&0N95b!3)jx>nz^+$bI=8SM31kEq;t& zHq$9k<-={ygu~-z{_JW6OfTL&76I>CFWt+}e>T$@1lR2%YV2JvD;Mw9DZqcFi2|Ss zTs{dkFzJ=4#A;tLNrz)m@BLV$XPFY@~&zMnu4s)L*f>n zWVF|IHWCacD9I zOcmjN6W8%Ai#c8CabUavuV5(-2u3#WDQH_f|Dy15>oWjn%7s^xHc};Kn*b2;3Cm6Y zhsaMa0m+)uR3k+iK) z>=TdcKYRWL6BXu;R^BZHa7-`?;ZlwW3>_t(y+HrTic)kHLZ~0`IKj4MN|;g+1Tk(k zGJJ!_Xjc8R{uZd&v+Uu1UpscWy zH3Q*l#k=BA2q54Td3W$I;((eCg}J<5yuFHwjYDoN3xl>Pcq~2RSS(t9zGZhE=X)*T|(V^g82%3w<>uX~TYhX-qyNg~vU10VP$A>6*a3p?=CD%vnCL98gDI9D% zf3vRCI*Qc?b5_8~oW6aHS*M>6cZ&wAjG%h-0Hcyzt3OW`Rn~9%}3uQi}<8w z^juH+YV|KBm*gcDe#^1^N3c2?9Q~83|3y~22>V-l0bzbZb@Wvw;(3(|Czb@a39AKgh6fhWoO^eMxUO0G%>i=m?|F8e5=_mHXgC(N>f4sZP-{*qKuswIa z37Lt11WqCi4WNBemrsEjNMqlyI{=LUw1QAk%^*LH4-PGQWtz~aeZ^N$prK95JgwX1 zu|udgPW#6cZ3)5MYu}iGBX4`=V=zvq{#6|^OIEmr(n`KoZ@j7Ls==8RJGb&noAARi zdbI_q3)=cv3`u$Etdjc&3M>ErZ?k#wZ~T5hLB`s?kfQ(ezuulunndsN1BGlDWdWjajw!XY zlxj9Zdlexqj~N=#YX=2YhwX0RS88bhKjxzl( zI}zVS?+uh#ORq2Tgx~y{T)W`kpz;n1NhPBc!J;gSIfam4VVBYgAEVnB{GBZ@tM7iZ z35}t=UE_~{!Q?3~;@U}GH5Zet=A9l3=1rh=Mluiv~xEtRy=07ixYMo?^USfg0e zjT#Zb&P0RZ0FQ1;xKAoE6^Wq zTF7xF3ylCKum>O@3WkVg&UUH1c<{PLk!pMnU*xry<8QMwRPgwU55F&iU?vU-Lf{N| zcu?Zyk8p6V*Xg)*AB$$;VEgWBFas|Z&ftzcrG7zS6?JO06vZ)g>jV|&jusKZ}rL*ez)c)YUK}vcwOeV z{5qmxz?dd0yWale!-nsjLbi^?hzBR}M{W)Z|A=-!uJyK7`5WIo3|bOX5FkGT&TE6F znB(sHxUYNtju{6M@l3D)#l__JH3N|)n#<37>9_4Fs}!eIQ5LoYt?&g2}rfd>Gm`#BQxO#pm6FYfb2~l zjH~!I4urjWRKO90Nr88Hu`wr>27@V9+7RMb?5g}oo(>6n{N(&i?|b5$P#n^Ox3sJg z+x?UW5`ZoTOOgPZPXB*T#F4rk_BYB}8XfB|;)5B8Gj|>ExWCnwi2KgP^o&8-^u_aM zUyJoxt{COp!3-pO@gf6C*~bElS-(gV@dv-011&Zy-)U#Ft@7GO(cuZ^xW_(r5P7AO zA@PTvZ!65{NFc0YFeOvqI1;j^M*)l!l-Lb_GfmJKvNO!%sIVj!>ku%C1I#`8_(yKY zH&9ha+=%Fia^|xm5vOMLDNbi&`g|=8*X?&GXn~gPv*}mkdia}K78Yz3rJw)Pm?QYF zcR^oEzk<5{x~sDjFkQVhks9s|>60;i|N7L-#{GN+UaQ8q^t($QSNrY$C3HG_%_rc6 z8<>fR<9tsoScwcsnAwJkEe*i$p}X4vhq?~A>JJJD3@GoQ^?{A$cgU!&%?JySxH}SD zTv?Wx=pW5-&Orw;9k7tC`UjWsTrD-jzCZCOaQhv`!6Fv@`C&`?(J!~QfDrI(K}N|` zJ0seoG)e#0Q5MiJ7^`S=-`}sisITUIbgl^==k97eNWfpR-`oAX5`=?V)e*j5zPc1r4F!oN60mjB zLHf)vm4itR+)@vLQBqmfFb5~XQ1A*7ejGl@rWe@J_A3xa;qzwxaX~ULtf)U?gqmQ3 z#eZXV`(j#ic^8AJPTqzd_kS;@#Rpp`yvvl^o$m_+AV}TzaTh&*7ZEkU@=E?it^@$^ zMt2?n1@im`03_d}KbW8Y_xihrHEo~6kO;X{`4iLs?~S7WNIIQ`<#J! z4Gy02)+z;2;t;sFXo9dGJ7&=v*iOFRLc+N5WN~U^1azA|d^zLm=wR!~RUr9;CK_Zd zQ2|h)gDRtge&GmeWWSGJ(Ybkir!h!CMG1|w=S$0PXxPFMNI;q#c2dRf(iqe4@FRFz zUw@EO2Y+rM!7u+RK~v|5#U4{E$5}{D9-|tiQnLMASv;e>X&=G+suLQ3rY%TJO zDi-W1!_>`h>Vrg!i0NxT-}9BlXR79O1;P(!c`>>eT36zvNKYNcD-3lL!g#LZGtWB* z!>Ps`eXPc~k@rye)LQkMPVL>jl^Kmij5#m3aiQtox|KU-k zTI&IvFt1+$0a0a=SL#CbjS3_@I3z4=N@5#pj#ju>NNVy_6|9i$7SzQpEzDX8hA>w3 zm@pQNl(PoYA$Pww%U6PkBnt5e#)b6ZWlsc*gTSD}?6aH|P1?*{bxA|{YH213bihr7 z!4;DoaWJ#^;Xo71V3qK_b4Ic9!Ej*JO?WaGFU5ItgFvCF`FqHOGO#Wb-T}MQI!*tp z{42qNIdcC=HcozBH;7=AObP*`4hWEtVk_Pko2~o93X>0|VgQ_ou2aQ>>}&VSh{s}_ zyQCy8%SS;(cRcK6k9|%3{#M_9Z~=*a5G;>i>@%8r7v+UZHPJ| zDq%u;b*?i{_TBxJ-HpP-5d?+9fiWe;(@EWmY($v8P6~n$4l?&puz+$TSv%ZAj?Df* zJKOnfup=|Upy9+aGohk9af+YQ@d80kf6R<&;lQ~H%yOaLDMzN?3iO0!;}`4eF4zun zp-{1*n1GE0M1)S{d7{mI;_K@Od*{_{TCrsbkf}Z8s~+(NB%}qvggtD(wIc?Ie@Exw z7|$=yU+>`t22`1`sm9;mae1vjZ{|}H{99o9{JzwmFa}Kj6TL$1iV-TdOv`KEgH4n2 zxrX!J{vC?J6DD`v{26{pGyUuzwjwgo0rYVEOr7%n39x`ZAVg=M13Tr#k+TJ{&9I2u ztLs*|U1BLkp#fx3)RO63dpsXM^|+hHITNW#2p9)g1?r1n)i*|VC(Gs-w9p>Wl)aoU zKy?XA`G|?gir~7K z;@z-$v^BU@@%#Ei`{jRQ*vNEx1&I2fBLI`=KB#5GVQOwuCU@3d_hG?dh(K9^`T#=p zI97XDMKIjXEZ&|KcmT|pcaMTAN~ zlxF@?330GTn)2okpQ7RJyn$^4;E{;(k;+FaRt$~`m=T1Cjg5gE1h9A~2tiP1!0u+< z1m72dFXV5TSP$r-4MG8bTFfLa%&byy0kmQp%tc=2S7{WOEG@cx)NR~TUDXj_<5QN- zBb7+8>z338D@`5E1x%om097Lvofh~av1dmB(Ob1){#&AiiWCH+A|a8$#3)WAtYANP zQ|et?Eb7^af&oxyJD?=VIv1adVjn!XbC^+4T$4|cSPZ};0raDtEBC$StaLm$DzWfS zq?5nw!ufNJRZxOZyEPZC!H#9}|7|O22QLQ3fZu2>Mm%?jUfOTrJsd z+*Vl>uZQ5}5IP6YOb$pM?KCb+tV5Gxiil!EN>{yWsO59R-9SOFin<2%qlg`;WJG}Ey-{oW$yt`pB@xc>yEKs8ARzS!_NOH?B{MnU136>94pTjy~_dC*bvn*Hz{4Sh7J&1i#~MK1ip277K~MPtKGrLGz=SUAH#q#u?#1{*u2mnK?`?skqS2~ zFqrpL_XaK&X-I;F*lCqF%82LC?=+_>^rGHYMdYWs8;>y9W zdk3SK;v)b0YQ+_W1yDTSv~n!hZi8mK=Q%Kd0we>*Dt&Y7?_9t{^GrL;z1C#IKWY&b zGpt=ng`4;1br+)WB>owmQ^?oa5$XWsW?n zXzwlCg2}0mG;qT>kQZ~6o`;5xpBzSB*sSc&q0J9I*|k0T@7>@RrZf*{K$t*=gNx$5 zuYRp`_EJl62!cp!%DzGaLFYn*1T^W=+H1i}6W#LOfh;XA1KsZd01m+w_eXPo`jadv z;Ls73Mzo&*SH6%R#lp&c?k$CWhg-t+#-K=x0UuI?-oA()ao8eWg~rFz8-Dm9gmAY1_bfBm0$_N}M^ zO+F_-^$)0G@^{Nh908CEkZ=Y_dLW4mCDaLx_i*RqP~gMrJDHV(uQaW1bV0Vu6(FVe z#p0Oj$iN#2$l99u3wx&Im5;e z6e`m>R%IAr=_L^{mb1rk{B;srwK2;tI~&26VP=AZwj09)t2k&-0)ud+xeKKlhK z4~wY34Q2Z$Zj|D~sBH(1Am|9tP+h4!C-w3|$Yo094#p#pcE*$U=nGj1>G)h@mi*32OTkwAE#ZrdkgF!*1q+2vauxI*SZW!E%`pEFBeJx`RvM4-BouoVF8k8560G^QGAd95#FqDL^kc~=+vt#X2+r5`)jQe2 z9|f;SZ}rH>H1fcS|N86iz9R^DWFFu0^zJW6Xn9{l4JZF7Yuq+T!6QD4|dU8oGnyB$AiBw&~y}8}L4S&>zIT%?cZ< z!gp@4jyEcJ zuv>({hy;O}h$2gmvN>4~jhepxU^}6S??v#wRlwK+C=Ea$j$K9z02Od*nx<$u;D_LtGEc-B$n_-(;IyO#n(rHGdd&o(6*`X7s~5k zi34^~Bq1Q1h|#PNwjk%p>3=l&R?rg%6D`UlVhoDewU)t5k_^&#A~Gfyfkeo;p!rsO z95dww*M#=@lS%PD8U{r(UOre_C&=%cq@HM?$v1Gf%!^IhiW$YUEJ3rD;C6* z@09stvmeBjYgh-r2}PS**&J2d@g!fG06m}&SO)k5 z-T}%8Ial5e!H8;SjgbM@^UVj}$Ac0mgd~snE-R(z6U5k}gcyL0D=(Yglh2ZgTkk`l z522l)ykvOY0rn4nEV@|>Uq*gFz#F=?&oY2H05Xx=<9jba!VDnB3UZ1JkZ-^*$UK!y zB61oR^-wZ^5)1-LAL++{l#wzz!Dkze-k^36fBUx^x}F4~H@28_6|0@x8W7T*_q zS@8vfq~+{a$nioop-7`dGhrd%?4|=+iF>=f-iuuX1^ytN$V7x`)AV(hQvyh!L58{eQg#C&s(XYJQW_n}!eVzJGrp3s zZ|DB`dvqh`|M&iGY!k_K@a$B0WM4Kl+>tpWUwJ1K9QR(ojTjbK)!*7(e}u&X=aOCJ z2S1-j=K2KAJS4mJ9R>%qAcW7jnI8m0+0yHphsy=V73<#w!wX8E4Mg?R=W;#7)&}W&2Eo`2er{q+9x`6l zBEb~OzCr?vqQ4%^v^G&f z)D{dD1ArKS;6SgH;~zbJgf!TZsO2^Lq=#hL=!Ryc$7+ z8SP}26TlI~-$X#|XW!rMFyUcYbnjxD%c(vrq4Sr3FH(xtYiHpCFPI#~=qCMc^%M3|?oR zc;Mb8O|?8oP*EAhA*fM8g%qvL_i_wR>0QL_TX+t~P<2Afj@LIud$tn@mONu#6;!tG z%nF36E(XkYbv26J&;Z9353w1lL-&1RhTpf!-WUgDNh27c5yK8hm+}=^SV-(ausUS# zn4eA)$u2*SLNk@GWA44$7Q64#7}iX+y=$t`${i^c&4sHY1s!_(E8#Y6Al(9DcYE94 zrthRK?7LdtzTqQPS{gBgg?hXJ36R;*AD5F|2M3lsl|!TZ4xT~|8TZS0VV%F5*g0=2 zD6yi}hT24uY{=?<^?QEbD_GhrjI{w&Af_HUErYAglqloicqXQ7i{R?StN|ioLX_;Z z*uGvM8ah|co3l6wf9?z~Y&xoQeJ>txM}3p=%Y|#+7vtsL@kD^dOyLUekc1~M_HBhx z_E-GhBWku=!!U%`JfFT-#v>2mS{O%u{VT1rVl7~AfP;X9BZU!*L!r^pg0Ei}6MT)* z=Vaf=rEDeNp`&b;O!<2?@39ZHY z;nwZfJO%@TEFVW`UM0?_z<~YchK3${NF&Xc;5ems5mAj&Dxfp59>i3egTYG2Jz;zq?0NfDuN=n z(5 zqPiVESAOiiMkMw>p}t38xmUL)h7f;`5xewg`f^ocm4^G#Nv|y@iNaYBJ^_vi!lO6O zt^+XQLuYv>R`Cs=wEY5^R0D_H=Pff#UQI(;>RSo8N!a zZ*nm}g3I*$K269aAU=WQB2rSe)@cz89qGb*#rpuOPpG+ai%=q10>(h2TrZ*PLri0gah z)H?96SUg1*Xgab%6Z`*GtX`19m78kpj>f9EQtRn@5?gExua5})pX#$fd2J6M+JN9K z6Qx>?@5Hp=b61!2^LPL!!(sYh;9R06d3-$Zj1s~h#c&5Tx@>f(4s(%|XQk6#?xA^O zdePmpJTGACvCOA)^`UrmF?kvK4SbL#COm0F53<(p@=myr-z;d_{-1&kZ{Ms;AIr2S zLG2d6Oy2gxNv$dUa}QDu7M2==g=xTWl!cJy2LSylPydUms@I595{giE^4E||-~=fE zm&!DngNb-Z#c+c9i)Bbb-CYO;r{c_<+!M9(y3AgJfFt;)m0JWS`BdPC2nh1awWoM} zQi>vO7s$I~BNB5#0Tbl*e~GUmnHUuw$|&!#N5oBtJS0c`hquaHew_Vf7t`u5D&>(M z?iv64&HrZcV2Xj2B!kv(|I)!Hfd~Yk=UX!`h^%4aL{QfA1no2F{fP~=FQ}~+@R(0L zOpvIcHT{3$cLp><`hPrvc=*DV(=7{e$k9ZU7%+*=&QbFmoP0<$&BEf-^upKk#4!qJrXhQFL9rY8NkIz@8|`K>}W6LHwhiB9^eI(a$#x8dSmq z(L?<{SICF;*)}LAT_60bg84-{>`0_A1(O3_gbxU0Mp{d*85%fgAaVhK3m*rvPW~I~ zTaIP=mlVNFCL0pu-9r}WZ398!=2rJM-DR#dX}A|o_WHRJT3lWcO|ks0S8L?$?x zPMycZ$7->*nt%M!=S$cIIQu?qx!VHPY>fr)AhhX-WAAX6o_^4$uBkg?7y8M;+o z1b1bxWPc@z_AcQe>~kgWw9IF|q|?I%4G<`@Bw15^imk8Zy13wrMXZBsIqFE&GrNKCJ}HWc?;`dekd_Pf!&~jOs6U=c#?f| z0(vZwK2Xq1s9p7-oP)%`W61K1D%V}!%UI~BL@ThlG~w4gz8Zo+Xi+2ufU*aNs5T3X zt)^rphai8CeA-4d0Y71RXW;8vJC@%SAfTWlghawpJQm_p;iYW`r9NYteRQTCoETp^ znK`MJTo@unf(ePCjn9LM@O%BWc0~=dA`)RD(h?&N;$LEhI=<^O`}TSk-uCymXsv73 zxjGM0{PI4q000VRA^BtlxVX3ajq+rT+-e~HZ|{#){r{@ghnKI-ujYEUvMb;4hUl(cNU4KGcL79V?P*B$$*f>yd{jDJt@IXzSR=3>7DgThL^ z(%uMyY-W}vk97Bwn&_F_QWQqZwvxEiG| zi`c#oRO}lEfPh8k&0tQ80Nqa#5^_tN%(AS+OV z*MxVpJZXt&dd>fj*jKb-SPlZr{2U3}()er|fe=tTfk}dm=Z`ce#VeS?y?Y&v7Ac8C zRo_jw%XImVF`VMOtSo(s9rxo__PvlsjV~y-0DtH7p)v{x z|4+~9N4wVZj~NShecAr9-eH2nMq-APKv8X5mPeBI5)%?q_sjQeFb>Eqa5pMmk2EAF z2)49npy!|(7IaTOWnm0qS^xt(C=E;UT>pvzi@I}s-N}>;X4Y~rx1qEJ7G5%w@h3Y5 zwh#MbvBJ<+RLk2YU(&!X_9seLrV{_e@{x=r4`W)_ zROMV*567_Zp@a)2(`G3C!@YL@c!w=6(Mx|PYNEw5IljFTm!ga8Wc}4ydd4h50oM_) z!3ujl7&g}OwBuMNVZeb+mH?=Nqe;g;hW0D>r4aFS1b@C37GVgQ`Gt#@aC_lVQ49-o zCWeHaewI|Q1^sM-fEXeYz!)(Gqb2?cQcs~zI2KVJ*5ne>UYaJpkbAy=x8++n7uXl% zDD#gYMhMLiIFeq44{s8Fk9f=C9!W(X{RmPwob09sGkJ|dFp@36e1C#xll%SoFVFa{ z?;4;KE8p0D3rB?&c)Qnn{^3K6b$xLMv@S2NF-O<$t!4W;LHa2|9d-K9vVw4cL*D&Y zVKFWiBaW3W?z%xuYwiCO(Af2%{CcB>_-S2;9L#2d&4fnL|^Jl!=S(-pbOwFzoR9BY3>4{~yI16zj z2X<-prv1MSf*~lV{gBK%%aa^udf*w52=Gv%>b?8BcJ1&-Uf6}jD0~XFuv!utBDJk} zQYyc^tC$srH>AiQ9z|nS{3v|&Y{pEau)3$=n>_;U;xv$OHk!^$qP`G+;z$A)?Emm* zj{`??W?}#c-^^eMLMC$dStlEUQG?Q<>+o?bUuR!;E1+}Ji7ygdPCOx|_O`-5348CE zjB4WL{#!(YRk}aR>+PA}B5w|aR_6Ej_zomT%w{b>xHu{bCA(eL6csvSoUmRi3!13l z@ZkN7fRX~CwXO}qV)ws$aArG;zPyX#%|UzK22M{9idXnxR}Y4Y+tooAE!gOYC3=s9 zMqAds+LvgdPy|p!8iqoFL=*}ZY#NV}%on9Vu#Pu6b=MEOyZa;r_@2u=MNXRqu$fu> zsVU{^i9b6C>CGqml#8(wPo!X*3(O(LK2BT_@P=I8x!95%d`wY?hFBG0Q4>( z4nZI{4Y9{+xB5;X7yvt^BIYc_zJbQPMjr}*niyoIxp(~@mcr~N90dn|sweqYt39F4 zrTBB3?N1Pc8KUpzz+^0Y70kw;*uDJr^lgYnAg)>#h~fCEtU|So3FUji{1eQ_Fw7t! zX7F_|M1Tq=a8Zftc!4UHX%M3yCGb-V1ynB{SUkLNOMmcK|~NigNLl=`QYQ(UYma%?q3?wj#P&&qmAi>Q=P6>2voeNGL^a68E>ex zC{+2cqXB40KqN1&U$7KFEW-do1Y2&~@MeqK;hy1h$Y5$~{lZTE{bv1I!NmTua@$y; z3`dM90znholl3^!g-#X*vl6$CkDRiJFvL8Z2Ny1xo{Y52n5KU7$eI5Mrhl{22#YBn z&^a@e`(CU%R!(36ORd1~ls1_AYLoqbe7>7)$RMpV*L35J1C5cA58<+=#Pn0!&n!ZA<^ ziBdUl`t!m&Np><`>cnlZfoF;vr`;qU2W+D#%*UgW6RnxAEgKmdG zTod8J$$(K;6YucO??p@Z*b0cyh*#fw6(aL?#A@GSBF6^!y#$x-rde&`jvDVlSMA`n zV)4C1e!8!rujh>xTJo}z9VA5=iDD8UXkIItLjdzBFo3R{upB)abV`!&Bf~! z<=(?49DtNK5`j2eTq7AcP}s{E0nEj?Y9UKmE4GITJCI3C2tf>^1_WKsvH~)C9hk`_ zG#(P_wn+UZsNg!a$vUbSCY|roVo$N;x_``D->u}|H?y@I1Tq_mI1VrRJLUX*Cf$v4lW`J>h=KUzcBy zIOrf=NlWZ|AJj=t(8RxZUsYFjU1np#eU~FWzH6oOUq{I&t>PLY<5(v>vaY(PY9m5r zmuLey9&0=Uf{&2;>B=uxH?Dy>y8Jr9J`umm|Qr}v7HYhjIW5QWh0CqYSQW0mrXSO zEcf6&H_zz`QCwGl#J#!vtat0ZF+@mr-n_mI7$gPhffxnJPlSKxNrNyw3G}FDhb>^n zjx7%rgmC&aZ;YV>6k>!r{ZiiHAlQa+0b%15E1m%evwn-n2TS}SR{ z@8pRm5)>5P@79$F6U_)SCUTiGviR5nKL_I=@(JL2uCHp>vk-oOOt08#8h|HoO60J9 z**$`o(4K}tAWIR4lc|=dz?7hm219|OVQ^HbtyRux(84qHKFu2^hCQb3z`y_|;GL+z zl*zByI4k9S`{~;02{U%>+xste`0M}x3K=2#Xa+c#{Y3v$`~KzJPwaks{r{J)uqc8b zn;xJCBy)7Pc-1m~7~xAOZQCw}MG;F*w}tPvHrw6Ql&;6)AxxfMsZA<*o0qG<4hS(YAMTQ- zSQsTgg2PK-QsNh2+9qz)l**UaV*M^;8Msi4xBs@@+i@EQPJ+QLKQSJ55SJsr>bCp# z)%SQpxC5Hqt-53ppfGB80aQ=(ePv|oxVb-0Va{~_Mu6p1u=G;>8AHQ9cU$3 zg?G&;4?07!JOJayK#&v-IBA8I@dFV1Baps{7%cc7kBaFq;Y64rm|61`X68cy$n#;HWD?mDo3E&5|0%))g zc|?cp|8m5^Qj{!AA40iY_5j}lr~dg<+81;XvyKW78$(2o14Iv>O%~5;G~a z@Eqh?J$OBLCZO?8y13;!+=Siyq|p28>-`B-TC5V-{fABqMS2wJKw8@DVFa1MS?PBK zfp!-FT0j!<2;nfGjM2oV;Z)drimQ@hjQ!hW3ITa=Jbjo?4c4rr#H$&n!HrMw>mYC1 z+U)*H<$ncI!;nA04exD_i3tz)-tOYLr0>!Xnr&RTTLvdj*;u-b-JkON0=tA|j|c@{ zeDq8nhbO56Ai;#jd5@KRbPvXySS-Kmui6K|q)?Z|d|YPzUcN6|gApO|)|#r;tKm5+ zE~4_?_6~!?;DC5QG#5bU=XemA-yKt2s*ZDmN|QbRf6CZz@2|MN-cPEdcoL_>K}{FH za7wDAA4ZvjnvxAaJV`$X0_-Zsh~TLQ0ba#2{toPTC6t|>34QyRO}{Kpe*FPs6@mfK zGgfB6)T=-pKsh4=A*s1rc*|p+2l74relqPRe*Ln5Gaw$Irs-$!A0qCbr0gm&+ky0P zw?lt--+}?9=lnPn_|4tw>h9vPTvw>TJpiQ%dcW~;bye4BdLTwoGz*LeMzp0@8o_H# zto@Wz^F?j0bpp>QAXThBW_?4U*_$#aMU>*fVQi`o-eL8lmqEE2ok18t^3Is{N zHJb2P3d_K88Yg&q`{o~Oa1dsS_n{TI;Hwa5A}n!UVEH%+$FYHH!^KyK52=HFE#laC zU8`K|KW#hp`i}CumRIM-YgidVUU%!N4rk&i!>yvWX{Qwdz`v<*hP^K#YMn@Lo}|iSSW5 zVGW;3r9-&+2kFoV)R~-yWa44~8AFuNZ5HR=g7@h*onHpn7g6y34-*9mo zJumGAj2pZ8Z7V3i;5IP~wmc(;*0KmF@=61*1d|AcIB`Dg!fYQiVDPqL>v$r z0SG#${#7v95sVoLWd7{} zt@&#e3bADJ5DwX%2+on>5ry}u4Hs4W&G3(zqeb{Qm$!iAjMl*tZF=l-TQLakzhQ#8&IV7nM_dea&MI(y4) zheJr_iJn5_tY3eXDhZ4RFt|*}GYEu46jupoKf%Dk7_YaR(rLg3(VW@ac|AeEFbheY zWkPQ;e2ISfuyn9TIePJo6-A+8ZX8F6|H{N2Qe7dDS5a~b7J+K4^k&zUb8^dYfJYZL z7JBK^@*agkR>br9;ZC=+#<4_>``4H)@RzI;pLRchJV9j&;Ora%u!JF7tjgx^@4i~S z1jWxPq39F8+w`{F8jS}n;^G2AqeuZ43WQ)Z#~xrb2iudozktW5FEwI@u}{C55?Gg^ z$LX)8%Ik-rNO(REpn(p^c85LiqfbGo(ugDnn`xf|k}3m;d=4!lAn4Rm_u$@d0Ho(% zbPS-t9+m*t4%#1H55~r8my7*Yxub{&?PL^?0GR2J;UTG5Gz%U&e!yrN>`MiOyA+lO z3&h9GLMCJY#;62xN&#lWjJ8ZuGB#wl_BiRWcJsuMKA)u{DNz0C3>d{?<7u6W#e$)C zV`5(r{=4rWdGEjg00wO#`)md|xd9BD;EP4(PHLozcn^pwPOA&om$EyWj9EO4}7zf}TQWxMtIxTC>-PCXH zf%u=|F|&Wa@3v1r|DfWNm^%eqOZvU@VEzAvBe135vv;jCOUK>MBfTO#^}$4`CS!iP zgtR^1E6^K4&oWz=Zol+tkmX|6|E3}Y@;|-3sq`d>xkcpSZCjTC<@r51qCC2B9 zJ`TLIVYdH~f1>qbKk%dk9`)+l#50*yO{eXhx25tW&|%~-|I_Rw87dZJ@?Qsg#}`*3 z-wFr~Av#O_c3*NDVJ?kKSd|}pax@W9KQG}!rdDWbQyUkH8CeM($M^aEr2ho3-*@iL z6W!kVdkb#}af9*wAGBx_4W!d_Q)|k(7mPL&Ww^?MoWc!)e~UP38%szpNSV<1!;2bs z|G_nDTC06q;H%cEAB7cAHSh!v^n3rUkb;?fW!>CX?(X}&{1si@`H(;iFV>o$fI{_! zK0nv_`ahK>=#lut0oqKYb{wUONJmEe>#O+|>K0FR7(bVWS{N@$2P@Iolbz#*V;pz} zp8T$U*+0e=^ZWg{Y8TzhkA4bM5W#ir&*&}v@TW1C?)S6O?;a2@b$7e*8S}+y43mxL zaBBCas%bfZON9gy67Bevk(Db4BPvKSr)qorD1GnVKa20xfzdsDe;+M(P28b?-|>mv z6ZN~hk;lqrQSaTjRR}_nq2U|7#cyZVpi?e3)wLNKKi_wL6yqnf!bb`DqF2PNYgYXk z74Mb)YNT+Xp2kKg@-W8+g+lky`nfwV@e>?hVMeo-Yv-cPH|1otm!QIhI8ZASgSkyI zv05U!gE$kv+x8)2j`i@NE%W%~ZLz<1KYxsCTXyU|>|CGdB&Ao35>sM{*_wduN@Km^ zT0aXu@PLGM1%8+FnU`sD#_`agPdT8=H5!zW6LL>L?5`ZCTy8Lp^!f&4PgD5>{@qk)&I+- z2C=0#1v3j_I|M$)`zgtL2i|q~zF3ScoiXov3@H|52}39hA=|yV>y>a(;0^b95BoXq zfs?6dZ-Niw3LmtB2Y9gTB#*xldu`u2bkd6?xTGj;jyEsVxVJ?tn@S9c^>sK74)fw} zhQG!RJ|F2?gzt3roIgpw7;X1#M;c5Zz(7|VEOs!5_Uq{P>n$ck7N7nUYg$mR^sI-# zoIXH66#DziN9@wu<7b2?XsLMrdj5d;p)~G9mddG19`UaK8^l;u;=FXwhCyzVyQEpG zljHwEWh6NMgFW%T1sp>71%?oUwviD57q#JDJu@f@6}dU8lymay;Lm~19uTbDxf0^- zWdsUz*~|X^%_jIzND}TDbgPH@pnF4RP_U{W3Upc^9dG@4`KIq_q{X}PH6>T$ySZFN z`SI+A@RB|b=rJrVi^7CnCbWj_s~hu7annVRiYxYC5%z|5neCqMO8HyW!l+|W%sup) zc>6J^=dn{qGuZ)<5(+e=>Pk?zZY8n z>Gm(>(653_?U%p9)Nu9Qo4q=8>-GE-Kn#cfz1!U#KL69Nxn-|*ek>`34EUu)6ri|F{XE%M<-|LL{0@6W_8mR}s}aDsoAiGN4sdPzhQ!S*kh ziHPPRVmUz|M+!GS!R@eY9#J6$-WpU`rys-L$?I&XmCCrP2U@vT+~4UgYj*i-1kjrj z53mI=0jL_N9s}=)R?M(wBK~)9;tA6*bR)86Ku7y>_Iix~N865lc!nLAoR$fqN2}#+ zix?<1wNW3kQKS0dWMz15+m`8(kUo(ajwB*Futvq(iG{|CGi^P5!58w^)J2dWJ$9ec z1)KV+g}wm1W8RWlNpKvEM5#W#DYLb2!g{t3XbVVWpNKNyiR0r{;c-G21~NKvroOP1}SA_&T74FGk5gEZP^9yI&(l~%RB zX)6!>+crOmQvyfeIw|G7;`J>q!4Q1iwqgwL_u(+>`wo#(32RmKj8-8nT9^it)PW&m zp8EUg0#F3z4ubm?kPT`29s>;QWCVY=(H~>I`}X-fPy7?re=enhfcb3T9-sH(BXNm<%E1R7K#e)|86`cJY<-^e*3()Xl|p{bBa^vJ1wbM zU8vKE62qc0A!ulZ=#B`$Z?(mNQ7MT}Y%R}w(;KFk`=s0L)L#E9s9bol<3c!GAQ27+ zMCO3+I*}Iw0Jck>+rGtkjs>IRADq`;xFrT6jW?Uuuod7W0T?qHSNYpxtIdRiMGhf!&c9mdrhWXL-LzIw zNMv8}C(43!Kg##&Mbek1q%81e#pUwwU=Bcd0n07m@`|Xba|3t=;tj!&Dzqa4ba{|| zz2az&9ORz9YysCmqJdeM6O^6(b>1OqDP+Xh_&<_;f#L#PgwIqB1JRB$p;8MkSkvT_ z&Gch>EJ6PFcgT(FFIYvL042Z)fCQijgGm8ZX+KkJ9v}M&x-DWM!f<<`(h-V(_m;FI zhl`Dj3SkS>e%KYv(z8uu!X3_9;zM?DuZa|U0r_4d*{Aa%RcfXhzQJJ-M7ckOs)2PjSPuR+<(GR73H<#QR z*zDuSAup)HuY7DL-XWnVZg?D+KD>q^gbVRnp6+<1w!fSty~Msed)JrF4uFCJ;ts%) z0}vKipD;u$0WxN-0noZIrF6Yp~z4*Mbki@6a&_Q;2EHn^?V>GxFhzla;@KZEP%}Z(zPO||3Pw02& zYoBZ^3g~4-9GMqKegKxIjjgqmlONmL*K+)AtLU;t?|wq~CPE2e@9PQsY*0Qh41e=- z{;egKuiR8nzTBJnZ~YavoQ#=F=v5drSdM6?_kRdM8gJJu7OgEmK2`3*!MPwjL0NR9 zB@eK&s_tJ)*zY48#Jvtj{(^3w*nP?PXfVr+QL*p&@AP}LEu(JE!%V{PA)PynhoQMe3Sf!7pizyZ*H zXv%#xhqY(F{_o*XTd%G7;Fy@Wo-Vb`jD8#`YPPFhy^P+d<>jgqyjKH3YPWA+E4tfj zQ3IAkEQS8v2af&|3MX$XyPjDL>5C<8*WcH&pQ7%fN8)}2VEK3pg<84Gcn1I`0sdf4 z;SodVKcGYUg67SS0_n>Qn|2)aJId-H_TW}R0Z&VL_C4W;Nui=v3f)-5ID#WiB z(_W7xK`?NqwX>kOQ0x#s-j8nSm+9@GLYPVmu>FDv_$&(7GAUUj@{tUu+{RX{eN70& zd_lT{-yNcl6t7F+b~PxO2xS6*W`Yd1OBO%?z$bJ12OP0jubciSzJUd)F)4b2uwHm7 z1%Rs*w5kq^_;d)St_>jlE|^pa^2LXMjpbOomk2J^jV`Q?;YP|Nl<24F3XM@~Vo$!` zviwwVa_PFt{kkpw*Fds81pv|!MlTJ|jJJ&-lmLr>j8X*36~KG~W^-oO+dGIqKem3o zfFv+h3c!jG{lw> zoF5bB@e6z&hGF2Qeg=h))H};mFbrW7K!_m%P;3c?I8jJenHO&!QiF^vnSr1z-S|H# zC(&Oimp`)jdLD#<&_-tN`Fa%%L;@0q(jGboq=hYqq+~sD*S$)}iyzFoWwE=d>|wh2NK}t{EJk+rUQgoVqpXTVfjLI5KS zdgRW=U;KQ>oBsAkLPI@aMKdH*JB%l7KW^G)P@9xJe1*RN01kp7{d5I5n2jNVV{tL{ z7ynzIaen{5x`s=|)pKv-T?N1Y?<{HM6XHHM%hvVoFJFS8LPZMxUEn;W7lKEz#PtRK zG)>LsZkAed(unw%iF~x8!{Z;mQe>9GrVal2ci(<77@p)*URxGBy7N(n8dyVAaIOr(;%xpB0NNm zTs|Vfii5>LgSjq%{-J=6LH+m2)?m~4(7O@}R5mbM*?!!<-b}To(h#;WU(!$+Quo&m zHdEw;3i(O#`}>&P=-VlX&JSCR7n*mxq!NM!uYU2Yy{SLbUw8Bmz-JMU*8v8p`ThX% z`1W>6yKj~B0gStV#el;YC?IU_N3WFV(dq|xMYLedM)-s?;_Ul2dSpnU0H0kiHOtVp z+O~0H1#X57%};aF!$~S|UEu**1p`rdKzN%d&(Zcbd#*p=D%U}uxu%6t6XD@VGb?jy zpDk=_LV*DX$`nee2yhH!#!_t|S3S#WC*~@j&F2PG7!Me+mNv@i2STC2sj=h@vl1}f zT#zWMFta1o1e)`HxltgUq3f3mgEjph!-ol_FK8wU0TU=I1faY~P_80a_~biMpy7!6 z2e@IESmAxs?x*moB%I^t%A?Qy<+RN)vC51H7P2YdE(L}JC|E+|+p!oxcA%xh;stK4yiC97HXx+Sfz$w)O0x*lN0BzL;7}{>cItqZuk2Ts)I49=y?dnr}l)T(@vWjW0X^uL?_6#>8fKG z2FMx=8FL74UXVY_iv|btN0qk{7emB@HpPOW0ipt+p;je2lk7#m?v`CyTVH=t=0 zL#?TqT5wcA8RZG81}sk)UQ{C{nAk<6EiH;-3y3o;IZoVcVL^+eV>b~Ob>J4SV2%+Hf55P(MF-a%r3f4hB1HQcbAakWroWe39u;ON!6pO* zdLy&KK2OS=(3fxx4(fZP(DnkD&m2g98DDYE*2k{AzTcstH?HEaF}Z1fz$#!MF#Y1S z0kq<4tZ^JV<8N)$>&QW8f5Zdr7Y%}Y^{=xO6=) ziYwf-m2`u_wjCB4G;1!2@zwWv+9im&MDRagH%+AlR*&2<_2}>HiAU)xQw2H3kipQrDR-^9+xKph>AyS>0re3Wn%o{iT`mnQNZfJBG^F=K>0g2C`E z{X72eE$0vkgP@WYL3s9%5uWY74t32S43*=RT8`T%nJ_bN`|R)|1%wJiN}qkHf52qV z6Vd7Ax>E8>G$J>YQz5;aZ8lxBLOg=eFr;~U{))qV6B0b|@i)ym!`dJ7YZK0Wv%0C~Fuzey0l z5C!y^9wg5f|^*kKm+kUCSUS2!sU%qzV&sQNhsG2FA&aV}7>L*pp$t zx(49KLQ82bVk}@e4pFIo19XKl9VNcg z;j-_AAmxD;1XeCFleRc<5#_#c=^!{8ABc(CK%e7DhJNYH!W-(wj|J)dzsw;s=pN36 z`%ZESHAU49mOFg;_YnL<SJPemAe6?0f7P&0s!DDAqXlgDlB4pMhIL5evthP`gMTXwBw#H zKHlN1Vq_nk|1&&e*Wqq(}pNhJ2^cG=y6xdES z%k=lbvX#*kAA_a2R)S;@DHutIF$_F7I{g6ovX4DC{k7%%CLG6*Xhmp^l$)>V7z(us z9dca--zX+W`D>uD0c7#92GczX7!)?r6ulTgH+qVKQgV!D;Rw3?GU~?T;(V##s274} zAQ}M)lA;8Lms%e|$zgig1r>84_Vc~|hXTy5>GljWaq<3wdh`s}%XPR>0G|Z`VFDOO zn*bDGw5f9Cvo(Nl4avXcWs&c%iM(r8l)+SB2q6kgD20*hFdt^8_-*HkEigY|D$|U9 z!GHt&Ots56vb#@tfy5d~`Oz7ciX*-INMfsTbPzV(mBcPT&i`K_g}kaeCR^1+Eg~nT z9r6{|WJEQC<-1e8@fP$A0>vSGpG%-GgK`)md<9Z)=YUFLJ)c`GRCJf)YaG3c3yK;K z!XQ}%U~vXv{=(xHi5>v32lr)HnzRXE{d~PX+|SQuinVOL+qdv=Fj8fJVNseVqgjI= z(LIFV@?oZMcYr-FCZ8Els7_Q76H6%_Gj_6mrY>xyfiMMNvL%2$N%v!Av6jE?dzZjo za3BDz0+0y7aq7DSKH3FUvLppCWdu>oMel)KX9>)z2>a=K@FC6*8Ws}9Q$BK}qRg9J z^F^sn3e}i( z&;oxVzo-XM00s@>1P*y5asTe}n*C!(IkF)W25lVyvBSOs;e8vxum*jQI|_y))5p4! zJRbYepwxMIbcb)tegP-}3IJVD7k)f`3o>Ve096R$?m$I)bPe6}02jSCvtcQ|w}5Qk zB@_^r1rbmp!w8sR0g@CWC~^B8vmz|tr^{~E-DU;Q_}RXtq?46bG zNk~p2MQI_AKmY(K2_?7?0Pp|*hl65(0E0jO|0RWY@B!?JfHgn@025G_{Q`$T_+V42 zI>`h6d;gC-D|iY>DM^`g)`k=A_`ncAwxyl{Rzx;ORBgWZpC5n6|KaQhn=LN*`~$>4kg*CCxXs(-w|>{GI>*L$(#b3?T5cst>3Qr*7uKUtz=H zrQ?_aZ^7{AzzpyP=$8`jBFFdB_+ShzUwdii^8eOnxWcmy%`LwG>)5~uLfOtjaD*Po z-+%ucJP=xe*aB9^X_+{~!xex9u(gs0Y0)vS|F>J0tiT%-rT_r|pvQ(Krr0)sAPjEH1iuF-Rk7)CePmX*1q^VigM#c>K)&>|a=|`o;hM z@I$~7Ht`|Wc`+`3_5Ce{q^WMG?fBy+!V-F7)1Ve~fFV7a|4i}*x;M~TJjQzengjfH7vo*8s6RyAi{t2`2 z90cYjmb(9xK0*+t1Y~Mp^iY8PiV^@1=HmG9u+d+6C$1)9JG%y0vr+06G5{M#J>Dxc9_0{0wb(f<*>AyAr)^=eBcoEGOJ|;(G z*q6WfeB5_VoXzitth1HiOcPB34eGd{y8B|KSBNL8STr0E#${fNB{&q<@TE z*qDZYn}7`-)&K+`7Gzs=068=9G1P_4pa4n$b3sp2yJ7Dyxi*CPIp76V5UE91v`Z)g zpd1eZDuc5imN)EXm30&!PL|AZR#G(OU0pZA5C9MY0f00N01JRX107q^I1XHVYycyD zaL&0X0Kai1Dfl@26+=OC++Ax|DlmztQY3l%ByBt%d%*Fa7%Z}=!>J5mY&ZC65m)A$ zA$VUCU;R%Mof*CGB$%u}ukKjc+8c4qGr1nZIdCD;F!$aOJRyn!y?n^H!S?Nh@AZYv zKmHyFFcv0clDc32s9=TwzS*G%icTGAHkZ8?D}7?`!8U}R%#1V|7W9-*eq95lWuWrL z<(}h^WYp}a4E{&9|8(uP6$iL~fM5&|3B$&Wnn(YDU=TsTH*cBO|A24^WF}HjG(Y@S z$yR}&08$8BW_H6SPyYbGAzMubn{&0blze`?h5x~3osZ2oE)fX-@%$^m9Zdl(?CvfS z`=$T?g+f{uTjn?a_HB&t?+iN2a;(t;Q<+_JY^(}SOht4?iM$nMLV{mwcSeElD{>k; zZR-?V+*@4I;RwY7&XPwkqXz>dE+%2e=kJpwL*-V^C0VIn6RFU^ciyX4gmgO3$olM| zvxg;KUXi{7i>a7zTL3?yOa1`hRD1Q#o`LJL>%XplZ5H>tQKIW#`2RsZ@{)}J0&Hn* z!e81L_#i>edAU2_u+dY`WWCRcZ@%!f^4P2a^|FJT1C7}RJagHm|5%Nnz&Z|BZaaSC z?*Jp323q;OeJm^?xf&=gkZr+67=vCBS+kKAPSUl<1Nm|b8o1K3ZO|1j%!F3+X_ z_mfvR8c(r>f)c)j#1YSmZZAWY0Avcrx1y5Nd!N`K_;zW8;3Ni$Oh1g?=zB5LKJ-Oc z4huprBk(x51VAfFP63VVRv9a~zupH>enOaJ8ec=YFhW50s?qiZL`eS5)#3G*o<;;G z>dAl=gVsZ;EidqWX+awx6C}r;K?DFDXB|Nk^ZdXWKZC$eyKilP=lv#PI10?xf>If@ zq|wh2F;(+lMDr{FOSk^)H7uQ9kPKKeX5PurJ7M@vSFa*Ki9em|>ALT~VV2G375FME(bA}>WZbjZ%vgLS-mw!)?Pdq51WH|L+wWT(fDw$5P+G zG$>al0i_`8C-eQ>oIuEeP~UFtaCI`P*<)T1$J*Hc*V>x%7s|ZTCQ^h@gaLt34WOH~ z2f<@kRFPb{-ks#TNL*StRS}{mJ;b~9{#Q+{@=t&P0}X|%dT^lKa_kkt8GEs|>sX3& zNXw8E7;pVVS5qMJE4gp{L|kbzxuaBU)34kA@cR!5L6+k2|NJEhuI2kYe*ykG6aVb! zLXw=JiTdj+$GO7qfnsE?y8<$~6U=^!9*D2K^Kgh0*jvcT);8Og!sTlV z)!p`?f$ch?q7t`lTC@j(1g?rdaZUne#SOe#&07!i@*m^!w;$GI2m5`Mgd(pYt+08M z7kk%~k;z`4oo2=es-M|2d_(M||ARI|Af$TnI1dr8dMW#Y^?#yhwu@Z?TT* z|8Wj!lUP@B=y<1+_4@0t_0KQMR`@ZOs^G=MU)-(m>yZyvP>@#k(x0YWrl0c&Eq-+v zpp+15dj9}E(;P@IKQv)b{74 z+ZE9g|9i)XD?kk2B?BMv4l0rZJj@r>w+3*Gzw>>84w@Ku4GZpIqYw`>xlp z7&@VYd10+v$pBwZ`OM?P%;U7|hatRS-%a0fKz`0-3b>T>M%JL>3vq#8+JE7wQmB3; zI@L?=;C=nuT<|ceXrS$lnzSBxB3J8PD~MQ&qh|OeB)wqY^KCXB=nGRzMUb_i#C41T zLj%R3>JNDoy$4ee;G#GHu}2F!@-KnHyUSY02MQ_3kO~2jD>;DkMjbePhyf(Pz#3^` z;3PHUzyP0sU>F7u78KUC_zw`?$PV(rdfaofNea&9J}p3UJk}GkRF}2i17MH=RB-VB ziZx9d>nHYs*?W4*?__*LA_P_u0CMNT7nhYPo4vY>&M_EX+#~^T8KXc9|J6V|O)-uEFjPXlkQ7xBIwP1jWwuiBd$JR7@;h+0OvYk9A)>mV886t!>utA$j7 z0N(HdCs7rJnN!cP-|bhnWf+||`aKUX)Rd$g0KFt1Xo2I*{{a34A6#I4cX@Jz1ONWN zV3U*WPwM?s&HKU!uV7kre}isT_KtWVdN>{cA&+FY@NxuTKt&38Bo?TnN38>n3Mc@& z+`w8{;}j!`#A99zV#LyZ$^nP>`hEfICSu;f*XI{fM5|x_`yfxh0BWWanyU`i`Y?5iz+XnfOAcQ8i*Vf=pTALP>aZmu@eA5t zpbSlM|FJQc@Hq2A)p!z2c^N@styLcp-MN(>oe7q}cUSy13**?XlGL1tKpp&KmF3Pd zvmM4|IP8XM+}i!r*;1nleZ61@+0JC%ueL7frEl(L$>fG|(9&9DAXt7{z|fdY=sNhV z{o3i5hw`RZ>&9vW!cGNa+0I2@148ztq5uLEz}m664gyCBj3gIlVs3HacP6kaVFU9O zwTtSfaWY#I6kMB(FbSgRdg#NDDI2GP38eOmKg9#c+X;YP84-=-evFH1V;7St3 z_TP6k`tQ|?qM9e?Ai!=b(s$b!p!mM&`9Q@)Q6lCwq@y+A@u@^FELQyR{rQi5?hH=t zVkYiRS2*of+{=x}yXiii!$o&{U2l87A@}@=!B}~b@Ok2!#tOZEuzQ$3*>^MC9=Lj&)6RG{mhH}egYF~62d~;VTEdmKbqDF5DD>x6^|@%+ z@Sp7WWV1v6X<*xIu^0iUR$3>uY#pQz>84xIyovENL6FfzYS*HTtEx(q7l%$Ou#CK? zc{{rGP0Vuh761UAL&SNv8}S5Jtab5zD!bEKh2r*vKZ`L0w-QjEMgVfQQNCxtJ3>)c2!I_hJ$~SP?w5DcUiGj4{3bXZri?zC zz+gIWmQKqIl9v9U;w-QKKcJLd<*(V&{iETObQN5+M4v2V%p5ptAq9^s7ajzpt~qq7tFomr(|l4Xej=|1n_lf_WH9M^QZAASw*@v z@4xj{a$}@>dyN<}$wzo7-8z_C|69G>b|_&_d{_Jw2tc=uE4ZzuxP^}u@n2{^$Xnuc zsc&-Q-T0xU`Yl3cvc@Wf1{d*1rMnsxgZwyr{kF%IexF2(SOo?e3ZY?qUy z82okuHBWKMh#aXu349FJ+Y;p!!L3!}H89)%W&JoX)vw~P*YMC=*im8kiJKt^14VJahmELCq{MwD+zR zz$n!D_-X%Ez;!j>f$n90ARd^#O_%-nfHwd{0KvMQVlKDH4mYU>Giib4vdDxjKA_IS z`in=yxoilmWDiu-sZPRCXIlpx+U&WI!og)~2EC7tZg>IcCy0C;1{{_{8HN=LDO3XD z8C%tT*L1%0F$FM#(&`*?R0dguSJ(Xz00V}fxmW;d+U=NPP7h57_!S=-q>s^exI(N^ z@+MIHpZvK@0=;`yh(I(PvQH%x0{dEvmlWQJ*<@yv(ra_%OIGyLvv`p2rGpY;5IgXm`X9!2+{;# zkp$wq-afMT?&kQ>{#)DtY{WGd;q5Glc`)(A>$_z-md00q4s`^qwijYQ#MlP81rn9h#Ojg%8z&=&hG@mbtdn66TY?(HiuzztoqE4Uo zETA{K7nQ#fnd0R2bG~`Wgb)A!gvRJL6Q)6e6@$qT#1v0~3!DN_^EkpNq$f5ZR7gPg z7-sAx6t{LW(v8Q+21;)n$Qd@k%JPimopR)4?~}o+u^@8%fuF+IGd5ez&$hm4P5Sq3K?>GbsNiT#C;+Wf zFb&)F%+oJN(&JNImsf4M?OK53#fRRDsrAjuPa!Q_>oa7cS2YKBD0-UkiYkWyG@RvzfMnChu-vD&SO7dm!o-DI!1H`nxbc9ZNPani3ZWD6*wlcHJBb;C z8>g%4@_;t4Uzi#@8W=#TH9t%tGB4VijJ9}c(;$`mi#NyE??9z zkQC~^x?W%a;>5)_Q6(;=kOn-BKjWPCEHDhC!-Jv>EfDz$)Evf`?zwHLdo17m=YP?7 z>4S7>6&l08*B0FcL(4vM*I)lnXeOVCf8R~ycmLb*xZi`FpKjLwBdQB5soSgj?we@|p+ zdqa3NM_@I((WwzWmPzmMu4*0Q20R33#V)A$M~e#a7Ej2kjiDsR2Io`(OHg&p@+b{(?+9QKD(IgV=kj7M@Eje}M7-(?p8F4y~$V`O?#T zLQBX&|No{Wk=^Xow~~H2ge=*7J)2yF94qwwGd7$AAn_Q;->4&EnCCLP6|y90!G9@0 z0YC<+q%U+i7X6Po54nv40Qza=3=H!~1I{Tz((yaz+t`(E&1WTRI}(GX(pJhMOgBfX za@HV0V7-|q&M6FK@uxT1J~r~cev7$$g>3Tm*oFAknK32-4hwfb4^Ib({uU8kMln4} zEZ|h4ZAZtMKI!QH2Wt}*`F zqkZ0t0rZFf1{OcYU=E`19etzl0Dwci0>ydrNHMI?0%LcSUJ|LQ1ca-1ND#%F)kg55 zC-_prxBg$)dQJ3$-+dV+#X0M)zyI@SWF>w8>IoD(gVK6NG474Cw}cB=5kPRF%!h%! z|1Nkk{{w=H;y_mH9594-KmPy-1=wZ-q@4hWw^{TKw&+_?L!=N7Y+iZ*r31bB!|Z-r zKmIB%?lu{Rf>@Bn%APzZ$rOMb&GUfAwZB}=hT466l>Psu2jx)5d%hl^%W-tjF7nT% z=OcLE6_{7G^{qb+BvkeQs>Pfk#qu=%5=ZW1bQX+e|DX@h(5N+SL$CLzWtaQIDH%!0 z|Nrv%fB-A2o3j5p$7e~W|Ns33qeR@Wrk365P>50jk)`#z$(uC1>mjt9dfNW z(k5Qu05jDDFz&gE&sh#11xH%A5IPDpjSw1ifleAgUun+^9ELp}$cr{ZmoIA}OwR`! z))P}ObxD%JSKcq1!odrq9*S{q4{_#+y|~*yx=Lj6k7a#XDZvv?$O{1 zDrsNs{>=W-!htHFBt0+&l3~uUL4!A+nF;Ah`ZgOa&#qcpsTQ5rkm3k|` z^>p;gSsnw(cm6=4Ww9Ouk+REOKshnh7gqa;*FUi=4kMpQ=N34yxPuN>HFr_UNHyWa zyA9ok>sEhpcX>vi32Cc7K1Xg@amT=qrJu}Mkxc96{0Kl3XGuB z3MnBN2hBg%fMuh9mw$oa7z7{^Ox%f9{J)rC%t2t%{%g~G&@`c7fhBd0>;pgP;|i*L zHI?W*I?;gEji&tAf?wxAgZj$jiJgZ^SUhnLhP-g@dV5x&77SM)R^dgbOo+klsN?eN zeUl_JgSSF)Bq{&|JNdCr&LD{0n-lm54kr$h#DIo>c2Qi*gwY8l|8fmdp4FXlXn-J< zaIe))vz5eNVYjXc|J zwp0$VS#BZ#BIT*67TAy$w-h!eaf~)$Br%QcP;d;E-wJ*f2FYp2iMEwx)&PG!63sM^ z8E+aO;c6U2#gE~*0g2G&-E?xj-UgY)hui%<@8A9q5Bp9DcdQ}&rHR*T4Jm=$?R<5> z)|t>=7fq|Gh4ZhXRQ1rw+W%lu`gXwnqsLJ=4CG5;#=3xLD}G1jz8-lL1)dIhtRVg^ z`N*>20i#?8BsU3UR6}+C{CCH?DX33t&6hMzKr{dXGB65lbyGli&=b}%fKU#wX$NT9 z5=cM6DmfDWhe$q-628=Zi_&9mFo1rA{mq^Ck5C;>nm@PnxUiv!Sd@-oMuOB0Jp{#- zE+ycH`=g0H8E*<;0EyW602g3DQcurQa=;Yb9Y0v3X_uR~T-X>gS|>=+Vu5XDPIzV& zJf5@9(PPNM*+M__7{Mb@87aB^dokw%XCtpaf_h>IjTRK~Txl$M zVhDkykw8+7nZ@}UP*t`1;dmRTXyy5^e2#k<1g*(`Mf5~P46Dvd>0+xR;Emn#QSrB= z1*$FO(E?T_^@75sizWYt@hyR)GhBGC!nC-_fe-2Ki+T_W+??X1(e-o?U9uKs zJHG>kmjPT~n&uYos3*%xdH=8EAQB=9oPKabFM@uicReqPNBVEZwl>8UOQZnl;^_}|%lfL1U3%37yHm(AHSbhwn(i?cP2lh9 zw1O2>`=|HkaPI-igRDOG;Rt1BIluh-&NE>@5E?gi_l#Va0rPc7F+FyzCq(`(f>nx_ zCU&GmuU%G?t`NqbwmIR3XvV?WY!M;GuX~OV7^^U^{1EBY`@=ssHu}trY0*FfIsPyb zB71XRQ8dMj>Ie=}f%?E<%Eyk{`d^RzV1yxopa6lusbUBVvg6}g6oP^mUw6|E0Eo7< z7_Gh`!|SG9jAej+>TW$w>(+vG{RB{8RhW422@DFE0PrTIgn`l-Z?zx*AQeCkgs?0D zF$`h=>M0%eGXWvY=K-9Q0|9XMkW1V!&_rX=OzU*c77$O03;;}$02kd_A{&4FZ!$;$ zjzNr><5ZoKFDOb~_`jds3=jYi1_AQGeysRN01yCxTp*J(U;+zsFNe2Uw1xlxi1JAe zAbo5MzyL-J@PZ!q%0E8{0099oq>w=XSReoa1;m+mSy|K-uiX!6=S zi@z9Ms^M|~0Re6aU=B?nV;BGg0aySCBiA_V_ye*4Qi4%a5jFCl04aflNZcRw zyCRh-qCso`ZUGbpJn;(1kl~@zfWw~AFT*$k3oOUCnjfe)^B@qd67?}r%5PG_BA>E4 zK-HKU_TCSWL=DEZ(a?g69|r|??;ruOD~x;v48_UxZw@EKGIr7KD{%cSG3q}AyAob29n^DjKt^fxNs2qM0hLw)+ z%(r2&S=qvBNF)T1u>5Tf1~aP#G5P+w5p%c!;FaCn01coB;?ROHP<%jlJu^&-=lDYqe&J}UZv4Lg(#^leJKyyGh`<>9!YdAcvM{yW zzPkVJmvJ5M=I~5FSjCi~FYF!wsSz-VFB#wd&*7jHfF`&BW~My<{!IpRSyGKRZlAy& zB#g_k{@5Z!aZ}OY^BwRY5U>%k2u(2DS_oM4Pz}1mGNnupT1`^-^hyGMlTiSp%sBV2S8O30KrR58U&LGk?J_}Z@0hzx5D2-luHe07G1D5vji^TuHIMi zn)Aa7Y@qgk{!du|`qslKfh{2?ya!~vfGB+;50^~9H4BIuXHanKB|v}xfCu0OJd*q6 zoqXNSqO^I%5yh~h!mt42$1gw*#k6s^oB$I66TzC)H@L$ZXc)?<_x+Gn-0uz`UC?wL zXGbP%A}L26@axWjfe`UqyYvU#7xk1uC{MX|E0RqADkUWqN z6eg{W$in-7Uky18q>KBlh++PGTDaGVLy#QM;3&OY>wkrD0KdNmYnKpc1c9YONo#(7Vy4<~XM|F+%QU;utHCri1R+W?>dM{#Jjzv|-lwVg+R6`P{5TAHbJ@tr2sdo>AQWlL|P7k#3@BkbUr5r*|riPX)*oZ z!Qja3Wk=kHL4qnYd5R2?{e;ya1ltJKW&=xQh7T-wLXz{B08N&2l&)(az*gI9w`v?| zm7Xo<*zIINL;^NI3t>=k315mqaY-9+hDuix4$S5r5YoDIWV^AHT?6Xic;M(KW-TQH zAIAgqQojDT)*m*8QUAlA^~bl=$_V4ZGn3uYFEava@p|F7>B#nrj4&qox1O-bu*fq2a1%?3-Has@YP?eo* z+xtWR!oR9r+|1gAs|McDuLyTzldum7cmDN&zzk^H@8&7@BbQsHViMi$$gf8lUA1b| zr+3Qi^7X$ji;Ni-zkmP$3R@xJ1O+(gu{9Q3i!H^Mzwh|+kJ3rUl(65wjg}tL`J(@F zXC&c{C@IFD6Y*uQak~2jfRGRm6Ga7CsPWl_T6p}Cd$HoYIQ9`yIgEi5dp;Apd&Lu} zL}dSErQ!mGDj!_ls!d71n#>+Ss;INctFkR}C@8cBs)--kfY*N@+##XPNuwtLffw>e z5ASHVnTzNe2cT#jo@l;8L?xEM|MW-iFJ1%VkNeUKii-8%H}MTGQL@y8VHJpQx$4%7 zaD!tWC4;^EUyhtR&Hs~czrtqgZ`WEz zD<*$Ze-1Za1-E};fanhdIAP*Y;B`9$zR+#ZNAX6y@p^jLHbY}aI@XcP<4DMPnX)U{$^d3oT z_KK-ob0(bf*730_hY|52q~tlFLR4}^n>i4AdB{$rcBb;g4pi86+Bvw<%jNKa#u0=N zfrRcz8!jUbsY%#tCIsW;fk;%&X=9?xiWoE`_rSmxCryvy{$w8X4MVK4X5i`8r9uLW z_sY9q4@?+@g20F%AqRIGDZ^)h;5~R*1Ptdqn`hhd{TNXq1_@ohT1;pv<}L<*qVH%8 za&Ue1q>)OzfY=d8E|@A%FN$m5n=K^~vdTb7D1n1KvBP+lkzb!dmY#^W38v041A(NmftUzWnIIvo2w0{5@u|L}Z@vh{;t&IkhGfJzbZ&0XI%Fm+U6 zj;|8khQt8DAOyGuwAcj%0V9PL(~qI0-TG`OK^1#~bL5)WKQpkf6Jy62bwUtj<AIq}XtXBeCrJL0**Ohm(mhs$i4NqzqFuwsW!u+T^Zd9;?9$`ZYaKVT{ z#rIU^BotKK*@NV#@MbyxjS7yx?@Ppu39ae*dk6>|Get}KH{Sy;Sku-xS*MUM~d;kB?(7|(0EM2*Da>wv|#JwO@t$awebrX zzK|adGXgWkw>qt9gI$)~Nvdfqy0H8ffUE|*7UB=NMFRmGrNKrq~#)O9(ZGY^k`gmiWm8h=<#uq!gc?uoPM&$ZrdubWCT_1)pxq zFOQ951EI8gzqi3~WJ(7CCP2v)ws-^F1Avr=1w811@G7#W@~$ojUJ_V_4<9HY(GcG2 z&R-)PlP`0eKHO_KMU6ti(9rrMpbm_YG`e!2+ZJm6`er5BY@|r1p~l9Nemp#4*yyG3 zc!O$iQo%$I{{~*DZ7pKZlky+atr4yq*YLiO3WbtP{_G!>>azB*^!Pr}=ri2Nc<(Rr z!vK1KB}Tz|B%EoM2U*4a=ih`CAUBGZ7^}s>2hIBHO2157;={F7@E3_Ze=nh_{z$i6 z-%gXp{oi!&+MDqY_rr$TiBeEj*#S};9tl(=gRpADFJ{LQGlF4@Fgv`&d+1;{z!EW! zUazm=NQsD=@jD0Px+EeyL!lygONklr`|j@aKE#i4A_2iq{tgR!PKUOc_w@9#2|Lrj zTQ{IsBWKpp!1*}iOu7KMdm2c@53{P?f6G9ufD$ALXY94j zG~iVOuEExeOSIr;l2c&oBq1WdFA-qb`yb?^{c#I7!^aK~)lyWn9X_@D^6CKbz)BA~ z2#5+8C?D>+B(^Z5(hUHAYJcr$R2?oD!^|UM;)7hZbIV9pWEauo4V!Q5rt60&03DX_kD_iVP=DMnlhM_cUcn z&}s;SV`2$>JZO51V}key$*o>MF6-nI)4zY9|Im@H6FdWeOh8k7-mqXULdJyif8;)c zI+Hr@sbLcL(;DRR-4H=!5dK}>#9N?)w}XlJ_}Az^v&zTg2@otqEHEFrZN+`|{JI$k zu>{)k6PKok9ian&M}b%`#bUM%gA8@8pa{J`z#`zi30trwgArul;J3lSBObjIAjByv<8ONOJS;c@rmwNE3s^oGleh1Xg@UF)-y5SnW!%17AzEU>&BoPn zjxv4sWJU_RyQc0`g+lenxP(Wa8h`)*38^9C2nINj5%R_HKNq;Y#sB};&)jcXWuQMl zA^tb_Z_nH)3ymp|J6^Ze(S4r90}c7SPsQuY^Z1TPz2UQKPlWp))~6G`L0-;Mv-&F& zdp3GP`EQoms92GF#EeUU+BN0N<{rnhi4F^r9bOh<_Ak`d(^G%r^awgWV+-XXQaMPJ zjNb|mgcwr}04c283~E^Xyje6kawYm1BFg^EVeVjN5LNFl;kox>*-ky2XI=Xdw;rd> z2h=4W#LH*p-0s4U#nS0?wZFk&pjB?(FH5`S@Eb5bLO{?l5vCG2%<|#y>zIH4pp~1+ zbMOdj;6IRyGgCDUPsC}n*|Yl*XY3yed-1NOzxjGUAZ#1LVuy&?`hHZP%xA`r5=`O} zG`w%p>TvdAoMa=x0}*D=X3vQJBF>*qpWa^>tQZ8!$0AB(kt;RgU+iC(=ghTz&y?k3 zGc%G{iN5m`s1Da{KXH{ykPFKd|(2wCoRuXFrYd-@mUAC@0fR0d2n%kduJm2O{$!l*bX9RtFR0PimPKQ zGK0P}x(5J|Cl%CP{X_&sgm_HM|GpG({Jv1~G7rutPv9P^|GuS6eMR`bzx)@G6=moZ z3>Yw@az!lz;=Q;A5Cvc?YQ2VUoLkA2P5(yDD zFhD?67`Ut(tR+sL;!6QU-Pr;`F@uKiJHGz7SR+!pki20xQiKRPUN3`+(E`DKd3;N{ z7h?G{v61s)ZK_Qn@=y%aw`EkmJ61bT5pPdd=UydUc>Xn%EZw}r{6Dh> zMM1Lzl$zjb`ET{DSVah&lR7qR(W95cQ_TvztAs*=LZv8)Eg``APWJ^&{ug(>->kIf zg5WU%pg$0x&<#TnEJ?kkYdjCD+_Nv+Ew+RN!$V238DADY^HkmJ+X7GuDhLVycmD63 zs@|P%XFDnF>f4fA8>P20B@6Vcm5@hjTe<@H48RGYj6%RBNGOy2(Y)B&@0iN@kL6iH z1pCWto@dj9LvX5b1ba{SAi)oqlPLkff1*W^gcc^na4k{TIyt@=_-tdhaCamF=gTyM z6*#oA?^ZyB62Uk^{OK$t0=~Q5fI6sO%~3+^9ToxU8R5T$b9g$v053qGI0K-F81D(| z_`sd};9#duF%%pz3=RZo-`&9Ed&LC&_-WPF1ep!?hw&g`@Hpy_aTod_z^$cady0yb zTWY~rqq7|xp%UZozg@9H_q~s&*5ZswsA_RIA2rVM-H0&KfDu3gN-E&K3CM;k1wp$6 zy547Pjq>~f9pQ_F0-LgERPBs(eo^}dj@mzq^1Qtj)AT zLO8dMrVLi?v;qwRp`|8AdbOMX`NEhSa;r6dG(ILe{^f<$;<#&@d>ap;7V_|&j!$4ghlyY%nzIzP z&4$7=h0`g6|MONvXrFB)EQ1ubc;4T|6I*KjNj{HAP@c9;q3tk}`EMT#91s|H6j8B@ z2E5HQe?aDV+y9`DknMlr!0mMUz|@2)-LI#tvE?l^4;&ZFAgnJ{GXE*MJ<(7nq>mSi z-K!9An4fO$`ZR(<&C|b{-2RU@(vU4RzpQtH-#yg&uYQfY<*XVZVb0QJ0(<}jLmNcF zT2MNsL(u=0>-@jRoGLnj!Dgzr(u3n^ma{R4`mR9Gl8)0A7@Ej9S&BHZUMtC?f0a7} z74Ln(bdF5dUE&FJaCC*u9e`nXY=YsIvsz$hMV4!t z4*~gNHQB+~mOj_im^d_)ZI?nIb+(EZqTFK3)?PD;%)JT*O>VtI;L;XadFY{ecO7cL zgAG2?{l0JfWzcuK&6VLK#qmPq+OUWq1R8NzI~oV89eIaT2}Yibho6Y%Xv*Dy6!-#~ z09IfMXaQXSDZ1_zHW=%yXNK@Do&El~UpQt8N()88c5{&9S8`3*1NTFmeEx%ia>d2l z&Hs@6XIiMGC0=ZzKY|A8E~?fKC&Af_9F&8o^YY&mg$*AD;Sg>SQ83gF3Uu(|LZM^e za9NBF6l2P%f&r{A7Q#e9!Gstv*f~E3dD{yO?x-5&JBxE+)2Y{%)U2hXsfiq24o-&0 z0n)V0<&tIRiE{scm0$0-pvaPQYw>xHK>}-0RUE8W;C$VGHKFvTcuY?R3rY)$!^A!( zn7x4qg`tXZD z=c)kfNDh5!SY(P|LG{X(R)}d0nG`LiBpytjY$gxn5)(p%jsI}zG#}ckiDzcVW3l?> zFFU{h00J~2;|vBnKp?zM9sh?62yd60v%YS_u3q2EL+j_oZU&(x$Fu%f-RI@+f!TM% zjsf1%{ZZsb;hzEb*;g0_fA|mIm(ywrJz#y`&kRC*!JaG&4QBsAg?w0NMrvlF2NAeO zjO-d%@bY;P$5L#yI&WwHD1v@X`auN^f2xy&eS@>gNu;=r5-RSxMS_r-NI@$`-a(kV z<`1MD@}C^79%0&|yOSU_@f`h;$Y<(Y$u$bPE3xcK8uDySME#L?l(|v7gxBsL^zBrj-k$@KcDr=$KSvJ z014tD}=&1OzN135rpqqc3Fv zjrDOoN{pRO9Fz64Druwr3IYR3;k5=pK&ZWWf74RNyQBD?E7qd>=M`;;YzQM422Eon$kI+|v=3ekI)VDYP$Ax@tt6xJRm?w$w zqi3}xnc()=NFM|uRsGx1C@u;3pbSuQ_O~63B9FgMc195tA2Aw#3OJrDmfz|mC$W3yWYKdft5}V#6N5uI4pI-I|AV#x z#VmmHUL9CEUj_3n;4Xo{6oMOoy8+-ZzXk$}fuq?e^4Pxz60mVxu%>#-?f|<$X8{nR z1b|-v!eWmgR7e<&XeM)2-&WsWga}11{7MvVC^1Jqcw)Fxk1;br;5CE|h2fNs1&3?SE^JqI@n%V+7OToPW_I8f^Njq=V`TGCiSwsy#|CeaO zh;W$gdW*}ch&v%l87M%1Xh~C|4}0to#^^^<1WHRjUd&SwHukJ9BuVUges2zS7G{uV zn=ft`DkIC~Y0Uy47X!eM&?5oSf&d9n4ROd7jWB=yElc4eL|Ui|s#7a@+%@4O^{+U4 z8WsSx3>fza1h5S7#Q?;PFJ4mYp-IHIfU^bR3BlykkJs1pdf;6iO!?t2H-KQw5Z>?g z%*^+F?+Dl?g#cH)-<8`2z@XHt%T~yz!q*z{JO_ z5S(2vaVwwSZ}h&$xu1jR=VWrI?9m8$E2^s7o7vb5#1tvsVk;<#im-_Yypzc+riJ&dSwj~85E*Ch;Ep2tJ71H&&pnYpi4jH~16fG7OtbalNWa&274+-sz#@Pv1xsSK znpNf;#!wtg0Vr4#XzI#9;3<%NTvreR&ag~-2%SecyYL*qRtV{>SG&GExtJu1i;TJ^ z0{{gum36P7Kw|-r1Ef=bPRC)kQ-ZO8vlzcJel--y4LtC_u_E7RicV=?%);ysD+J;5;)1Rng~!anDz5Id$%^*Kin+%gj1*e+78NlD)=1YR z#L@7&DOu?pITRWdSHmK2804s5{>{|A_6N%hr$&IEoVtX~vJJ7nKLOsP>Kcfseud)j{9Dnr}+*F4K+PwW24ph_!&{dc`= zP_{nDW0CL6Rms{A0P%n{VMP>nP0L{$ddqxw(G5u^7S;ugnk4S5mbAa&Go+LB7lq5a z>KSm>^7Z~rn3{(6aCOE984^o&O_6H~-?wJrOIqx5usG0TZ=A;$iWjjX<1G?+lhh;Qc>$6C0R zosSX9ic|l#3QBf<$7AT}*gzBfo6AP5dM(QNkko5vtjDz9BD`N=NtoilUM}$foZN5W$O65<<`RT2O;6-63 zkEH{P@`s6)ZqmO??ceblKOruKMie_?D>nm$Qh~`G*_rq(uoGlSB=((c zr168L(vF!`Kvjwpz#|U^NxF0ht4*ik1(rrWS8^e{G>xEAD*@wRhh7IWAhz=msa0>R zwQZi(OF{&(cq#My9^zis>E7$*NlGokVnZSb5@C_Za|)o~CXK2kQ|~M=NvC{c8=$%l zQIn3D@YeQzCL@G(b$dLK7Hs{mlGpd`D6U$3G{G0aWG}Y{x@5|t(x2_&;stNNb@FcK z=+KPy{XS5TZ@mrwi)GIt2K$Gq*Ka-g4#Btd$qNcZCT1OvKuB2PXp|1dyr~rIS^(jI zP*f@<+Y`h1EtOXiNEiSLg0E9os5!(jc#dn(fL0F>PA^yvCPmmGA|kqc-?f%5`5LMj zRKW)lJjPKcuT(Lp*p1M5(7_gF1ex2oYG^P8n8N@9u|ucu?j;`k6ftRoFq@bznPjwu zsh=;hGY#E_D`&a1(tv=d0=LY7vols?gRPd!kEawdFv=11+{;crmIW=RN}9aR&mf^K zgpa2aOJEwBo}a5ii)GKBC%<}$py3C+Pxlh^zFl2Zp*U0^e|`=!`00-W)qCsGp^zYQ z_#uQ~`rY$&n~$JdOe~>*>o=Eg==4xb9u`FxEaFix4!`mE<=?>ig)meN1&T5fIM!OA ze!v}nfa}=--9A>_jTmT*dBq%S7%mIOsKp$J#5&s}S(4o=?#me9#z3z5za7pQx0Y^y zEBWs$C`5~0R=+5o*DEP)elF8nO*XQvm=A?OxFUeq;MBnf4JkcvRAh-NnDlwxXrQix z=rRy@)`1t$1l%8k_zpLf&0wI7$Kv4sT2HXf> z5y7>qfZg4IgXettfq|N1VmDl>i4-M`{7-zoq6Ik6s6bcNh)%_BD{(nFZd*(V0O*C3 zJQN2YBTvQ&Yb0R~d1YRikqG4v_b&9<`c&M6D?*`n>2o4^X#fBbGa=;^1vs%0_ZoUH z^MM#3Fg$G75UVQSyz0Wpu{I$H{Uh^iG=z5^C)fMpH;DeK@IIT59Qd&^AE*U(Zx3{_ zBVNW6<=K0~!K~v9g8Lg|ke-!nzp|2XsN^~KeB43E-i^=Of6S(4rUpYj5qlV;<=gnF zZpDS2plF{Jei9O}#>^TOL2N#4Kd0Dj_1WcV1`J$UO8{4iVb-A?Ztq{_;GfhW^iJe~ z({KXA1dXg-DNAt#@LkXF%gc4lahP}v!C*uQ@PFx=O2tEqvlyKfM6QWx4cu0}SAbz1 z%&Sp*f`;Ox9okf<#Jh;^+x}p;!k=NfYGw$I#5tYyYozi+{$3DdyfgFm`F1D%Z~i|D z@G${`OLzma#^iXs&3SeCdhiy7**v2A2Eptb2g)z7Y#w4hQGS+^Q5((e4mKE|#Eci< z>c|NlYqO2qU~D3+{`a%shm9_8{C>i(VE7;2t7R9=MRheM@Z-d0Lu;p|+y_d0J7w9q z^A;x|(!af|A$c50v3wjIo8~y>-&36uq8c31^U&^E?-QRyJtzLO2p9r=5ELwbyq*Ka zUx`)Da6^eWMT_7T&&K*(1EXY=y>@7Zp&s!K8snYerex+t^gfr$WWGsHpzrJ-3I#}K z^?8Alzm&JXe_)A#B*F;Pe9!#D|NRH#!4eND*bDN=HpQ+z3kED7m?JtFKg2x5ST=0f zJ@z7zE*Cls09K7ATS6vn)-G+a}UH`krJdW#5sKf143UccmOy9 zPJpS`h#s6j9*}xArp4wW>oW*)5d|F68bIv0cgxvM@ebO7_RZvY23jxoY8@y~RHOdi znfj8e5K2r$%O6!|y9M{zeve**r;M!3S2I-F5Yk2ZIxzgcSMQ5gN9Gk#8qT_#6WYPS z&{-_7c-zY@@RVaIk3J`#=`XkCT@<0gP~aj81qMBcTFBhQ_5$;bcYL4!+HYJF`sRzM z`QaY?OZ_@%zlkPDrGKu?f2u0svWoKIOum|pgoYvX57mH>VJSB{2)snU@edM>)Xo#yj1B5_>3>2_8M2nqymTE}Dj1L`bKD>wwzqC($|Lj_j4Y<-P0uEx1C&llOp7BNuy z`%p|Z`0wv3ylOebf}jM^oF+7dT+G$8-34c4GZ`l~SOtZYc=1jtgVhp>Bfmik*NGTq zyAn_dm=DtdL|cJSNd_HI8casuq$Vx-CD9#I1_3vw0K4-8@J(r@w!J!_CxDI#g8*55 zuA?fmhc%h0(vwpMG?xb_g8UqwQ#UL!VPFnL9!GN>%M~yvR|~vRV@fEz{reE4Ak1+} z)J5x>c(2z>>oy|HX*swHL)v`SEXIf*PsMNVzV)0Fg)2-~w6?0dwqyj-042iOem#1E zKMrTbeyx+r_JdO6&zL|fbop6CC6S;PAJ!4LLz)`Sn4mwuXhMCC-%-g5W#(d?I(uRU zZ@29@ZYn__(XZ(aJhs4HvkzSNVTL3NEKE7#&Y^I=G$*rsQ`m7oGLY>p+o_)ASCSb z#{2P5xx~Kg4^Q#?_tl7ub|FS9f%uj8ef-KlzYhfVVqN*1prEGB{bvu-f}VM??^jA* zj^iAN<+k!3`Lxr;5wu#N%4kT2zmWnJLh$Zt^h+26Jq^c`H&F& zJP^ng?kmTH1=&bb8Zz(P8H~egWBFZG%1O$L7-2!1!x;5e)T(0aH0O`GO|k4jh5(%u zHQUej?YEDXhufv$hHe1y0BEWP&FKnChK3qmhNx%uQh+041Rd$Ldk3!L?|xTkr`Zl8 zp2i5dnV&srjK>jLV=RvS`fnS>r~Lx7=oYQ1%t^#=-|jLrIh9hIyE9h^2~VRNi4S|= z_q#-G&z%LpE#+ZOB?fv=lRk+#Zh`ujY@o@e@|3{TJehslP^VY6!=X7J@nfQAz&!Q_kOV?iTnis z39@_59#-cG#(Up!TTM*eh6KV4aA{uy^O=6MW$JP2Yk;ENk%086`<)qCQ47Ux^M+5?r zYq5vk0pGQur=f1kb&?96J%{(1L?1&(YnU!R=nZ%*`mY*!tpmS_v!m+YTu&$^ez8T0 z3(%12ltcITuk_`tJ}4j{JYY~43yBM`qJSJ3>aouWfE#9;;FM&nl2{yWD3f4KbgKTcs<02I=~q&Pha<7}!CFP}!7pU&Y^$~qMM+|jjHZM8Vu`!%eZ58(KX;1V-@Fohbp7n<2|fuM+w`kh zrf$SdTlsd}vAB~5(-^D@fTSha7Z=3%4S(}i7lauJd^VZxTLxS4ELeuF!JL8)#EU_W z4I`dYFn^?8kEp?8?GGUHkWNAI^LcQ22m6R9`zPO~dUWH0!Rbq*$$?8Rz$Ws2f6;%l zm`SB4A_Z6`gZacJltQ=2$*_1>UJFozQ*LTv`!R5;}F7x2m#{-V4w&C zPLb(-v?J|J0zrGK-^Es-F$g4b{tn+r9|2G#3N^*t3)&(pOg?Y%BIE=W>s5<`7~BMd zRlZ+@%^(FRab~1yh4)H{CSyZDLbg+k2k5#u$s4pY!hrq!mdEFab?6Ci)n&HE9KuC{ zz2X2W01BKaZDSxpop0yiT`$eRrZq@coGabX>){0lLgL>kksz_ZywQV44jQ^R$iULY zZex$k)8YSiNRmblD87f6$cj;V7JK^|(FQ0@Uj8%KE;W3%s1pR^(7@rtY!Hrc5fo+! z=(s*YcRjq%@#`X{h91<&AAf;+i23k@H2Mdf2((f$Esz^EHY0nLx&!P)xG`YH8W3fW zHk@bL5l50ZJL4`#X8#?_UZEhK5QX9#S}cfFGQ(Yd%b*reFGl)%+@HDrh6S?mU^miQ z+MpL8^bWG7|Aw2t!*}@e(=NA3_}=ad1`lj3jv6J3&2;0fMbFH5TpQ*07XWj zbpTf!+Ceqy-bM7=gW&oU2pA-P9J%GP;oN?CYIrRD(y!ahMJsFUZo*2no0bMdNN&dk zn3c6ca=~~EKv)ElmUDEb&mATcGzYq@YZQmfht+Bu>MeB-=W=)&NN9jHtdg;%h)|05fVWXVG239 zNkDz|e4^n$%5enj9&wZkUWl;hv4cT?pvE%asn%<^;`Y8(Qk=igZ81xJ$a_;7=0sh)i91!_> z@Kg-h?DyT@p#b=!Jj0&Vg z%0_9BpqyRWO%qf`U7roB!Zw4xp^To`>(|HXeUqZ*nZxwTncFwhJ0xeBO+ExJ@B6wt zLz~t7u#b6Bas9<_hKOi4c&_)#yfjeWF0V^60(c7WF9$_^Aao?W`V@BvQVtH^u+GwR8PrVrMbT+kzFi~l9fOJ4f?1?Gu8g`-pMbNOz-lMJDd zlMBp1PY8T?aF9=~TZ-efeTQI<6t=Y|$MsH~xOOIdVq+rsIIebORE`ILK!e3Wpioe= za7_bKIbgEa=A3_);}60yd{tc&{ERhv9I`fxtW~!@B}I5*DypLOtoVY=(DcJl;rv%w zUbx^$4}mKqU000ciA?6qbxV}OC6N@hxJ7CZzbQtU#EHE`z z6f$liB|VW$TFEC%oZKo-23Md858x1YXnSzMLqz z6kgf>E?y(*CDHjlUH(uAbOR|bK=qfb3A1}XQGJ^|gJAEUS!VWeAD=>gRo z+H0cmwuldq6mwZev=Wx%2n#RzG?mXs*xD$vY|_yfl;8OMg-eYxr@tx1_|va|xmdVA z&mU;Objz1P(Ea?he4m2GKPavN|0+-)>wrwkBYmGy_aQIE!d;s+vHVA8+3~DIgEQuj z=tToZqesj>68b_E#2(IT%)mq{3&X8&-~hM{qL#-5Lqn0_U>RmFSiB$jl*0z>(^T-C zMNEqU#9|T9;z4KbrTVqRZA_v+Ru~0y6Mz7K1cfWu;xEM%4o}m#IN3hEJ+|imS9pl2 z6D=c=SUTvG9y)_44)Ad8g8|gGSl;InkBG`|@DOkTGAFtGEKcmjCZy47PI+&(Rofh3 zp#aDN;D|cP(@_Dw-~Zp6&FlMlg_guDA?yi7ADiB^?kkCXpqd4NKw6K4mW;jr%UBN3 zpgAc{0T5siVggipld6!``RUzYzJ zC$0-3#ZWy1K7K7DN26cI3BbnXv@WaO&aVg+kL+yok}(FQ85hh%#B{M6;3HVNGSZB+ zEg{33=?*Y*h~GXlCbA3yLZ!+B5<{dYM=asV5lU_YoVx96<;1peUgET!`G#W{nD{8v z2+6nazNswNTD__o)|y@itz5g{>c~53usgj6*`gurK%fgnarJxEy_w@Qer*vRK(yc6 zY)}1~7_ZVbW`V_AU&X6^?#x8r59AAXcjvTV$RP>>>)yKN%iuBK*#R~M;h?*v)pFq2 zG%YM#Q^nq6s0TEKSx6Z)UMe^Tu-zAK?G7z{fqPr=1gB3!uH6Oq#ebl|#0UiC-7YJB zm??6x*M_i~AAN$k8M#C`ZwcmQU^-DnSnP8$hXeYpJV&3)UI0U&1tI`J-~`e=3I-v0 zDL}*NN-zZXHFbG>AtNY45MMn$XC7c*5RnXKf2nWrYWS@mn5iC!Owh2X+#SCB*>)Qa z2x)JFisayY6jlD59lLUU*_88&;Qg9WdTAgmpn2x0L<7rhmOExSbP9{yBy~bw$foVk z%P|<2G&dqnDbku8h1D_WFs_yNY{Vb=fze7U{a*Lfd?pgswXDO^-75X>*1Np8_rn21 zDT-o-DX5pLd3|qtX83gQks5Yfci{8k@N4z$)xJ5im7hUq2PkPNlkP)1zp|YL?+A$_ zXZdzg(DK+kBXHksn6oq0GJ>wMxGcwx!ePPC{>F7bq4#G)cxfra9SCzCB7wxsn2;FM zv){yH!!DO}_?;Fna{WsF`GN12|Ce;Wv zW!~mt8bJ+ibRlB>!wgURySE<+X?y1Q3{TZiUll8gP@{B9Q?w4yj86M-kU%A_s;j~yHTQe< zi`X{-7;=I-#qID}T?u2M)I!OQ3$j7A5BV4DM%)lpE(a(6AnVOD1ch=9WZ zDoC3fn;}Jem)xJaAC$~v>A*TOjjD^+d~t|%5UBaVKfI)d(*WQGO$&U%;oukqM&Fif z9u6i&gB-^k$DSMm84si=!LGXI+(YvihXeg*%EZ_#YXJEI)vCX&?i(@XIM9$R-tnWw z>&79>L*KSU40MaFR?hoW!Ir%g#sV~+6RIi@u5`){EU4p znyw(1s8eY#N|etji3T+=7XV6meCpW~2$nmhI31RiCvaDaj+{J5$SN16f$k6&U87)d zd8RiIl64V;IDFuJ$?;#8>nZ~>-+ei`fPKc)x8}&cXoDk&!-%_AVC|Y7qK(sGO4qJ0 zwrFtj`2l^rum|@^`qr|af)4|51SA&kt!VCYK@zah3OCd=CK(bGi`RW?-TO2xL0BXM zAlw-Orye|9JfbiV6#swf^Y&ufiYb zB+x$>a-OQh78waqjTcK=@Ezh%iRd!WXc{%FqP)F*>Jx%wl$FEISMY%xBJP8c;paNe z1IkGQm$xjEqXRIw$v@#(nyNOGknKhO^#cqdG$M=d!;cpNk2zz+I`;i1ZWs2p3qycJ z3j&+H=JH%aL=PUMK{YM;L69y6!)Jx@^qe2}_h7 z34${VNn*3PJi0?h1cD4(85U8-TzPm@K~U8X&c*a3mLju6#KnIy-K*suSMTpQZ?dpA zW#sp9hRW_O9Kqy*puvm|iNU75kPVkxxVczs(FfnCJU}al1*WB=2VmGVcox{gFl-*d zuzFvM2kb)!=mhZ_2{dT^uGryczRd&o%s2n~2^9k+GFU#&@D;2?xH1vq2WoQDhv1$( zFa;Q4gP_RMB$iBrfQTQ~-kMPV8$X*SPR{|%OSsaXi*UVc0)Pquu>k@1L6;1%Me>n>fTm0YTJzF8 z6Jt`E8$p6B+>BJw6u9J-Oes2g`he|v+@5Xo6s?IMI}JR44>YW`tFg? zF(u%=(q^RjfD_;W00}kyrtQU~g}#PU|AX(%4L5xaf6KDq|9PW(xL;!J+CACEHgjtn zZGPh#O8DscDxs5LfeX_&088opM(>Y}bo@bW>5CUHO3L9uj~Y0O1YhRj33-4l0d8@nfOKQFhGcrY!bIUGY34=LK1lWyJ$nY@!>4WLH%Wb z;!P$GoRcpS2H}xAUre+QQkpAY! z5DA4_P5pU&33vPWwa}30eftx#5DAkdH8=nO2}vR690oYCG4><#a9lUv@g0cx^Z)}?08AMe07oEnGuz)7JU$3m^ub~o=2Q+2 z!i=SXQ(`|_o@762`GLb7M-9SI04JxrnYO5-v?FsON(UGPQN2mPJ%<&ff(SYCCGdK} z-M+k8mi(eEhQ+Ws9XRTgIswSuX%w`0H1Z;hj#a(8{`m64%zaj`nQ) z3U4s~|Aehco7uvj=imOpVc?Wk$0E+KV2x$0HY_U^C`=+iT4C3Nr-IX9C&AUY9+pze z;4FcV3lO`s9&{(N+%1kP$6jFfn)~^t3|zCt>I?@^b8tm2ROV^J?D2Q+jD;L4aCET# zF_zQ;;{YSzLL@FwKr8|2fFrO6rT~#bjQKR692wnE;ee5?0Sfm)E06g~nM^;Ac>$$X z*J8cH8EC`Om_Y`Aa%Gm8yYhl(N z0th?W29{)_ih;ue7G_wo<>2j|`awOx<(>x?N_oknjL#aPEi}5j^JT%+vhZtVc7pJE z1v_1TNw_ZYOPlfA?)R^Ulk4HG!(daOQB?r{y{=}J@6xo&a)>nn`#qI|;IOlh#Dfk| zPpAGQXe0Em*xnyqO>NutY615L;I!U6##&zh=YTDs0=fWB!Qdc(SAhp)9Xh}iezd@w zk87@j#Q4PbB9Gt8_)I1yBmfHY|Gt4_9uOj*rG_br8ub_Etpw<-?&>C#SBQ9{iqc6+ zlSu(soAtmyfD!;o(+~s65y7}93{};YYuok0XD0ufzeq)WyUj!Z^!~+64)r*P9lvhc zc(31DlfnZz%0r-j#39M^=OE+{BQWsM6``BeDLCK71R2R`>KtXXumKcMKraFmTWVC3Shd4$3| zfI|R<UMYacR70R={hNA=wc|`5v1=HW%iUAFqNl0?{hdP2AOxBWJC4BR)~Jzmo?Vp03ZzmJ$biphN_oAdAJtG5$x6 z%K3r7(m2^*s6aH#^TGQlKy%!pHGc-USq%dWa)17sa`YGul?6cnbU=&0GK2uq7pgH= zz~Yw;IjCC!`fkYnU&1U3k+4W?IO%W8H6P40QTng;D_#nGREO_jQGww?biUPtFj`a# z3g3IXd2#Z)WxstdOr%TuTlUM^zn8+$yaUW&V=68TtX@76D+Y28wX@Zy?!ORvT3r_K z?0X^?!_g@ONRV>W1pbr*U-L-S0bp|1NV5=nCI7O;1G23?KqB*jRE`)6pjDP~_ydg! zh(!V@NGKhVfEX3%1Ab-$8J*C3k$}%0oBBybHo>5xp)D`hTrLn9(emblp$Kmc^$Fq) zsvZZjtg6scj0t7m=LjrGuzj6F1fe=i`+si#h$}xk{DkgaPhTxyVq+k8j1n~ngoDly zo3mvdX~opQp1qw|S^s~M*Y_(FcN|Q;VHH7In;tco0^wAy*uadyupHK!Ve(qM11}D5MgE#L#A=Rb7KOWgSae zzYZJS=GzQu*-i&MXnmEOe8@NL{b(S14gJ^YcjHPgXKK%>47IeXXhb$|S~MgM)%Y(4 z`UN1iVX;_;MPo>Nlw3FxMJNCT1NefU-hsda#Pq@6Ij-Xqj{yT&IZ|qc1U#a^+G8zT zN&MnK4rj_Bwc%|G{l}nt*HPC043I}Gu%gTP3FFwmw0xwOx2_y$JH5rOa7 zQ!zmyGQhwz6i80OL;PFBvC6KO8zIOMsDxs?6#v8hlq?Fv!=T_K<$=Wwe%~4x2@%YG zp5h6i(57d$#Oqf14YQtMOPdyW000cvA?P3mIQiK};Ts)%9GkD^g+%xN@gLPm2#cNv zh)b^x`$g{#9oVE$zoM0Gdi@8P&~;{`=Ohlh{Mo8$e}VvH7TI_3rT+~SuVcR9-+li# z-@b9b`lzFS@OL*sTnrNzHeHq{P!2@G5RAC98RT!$BaUqIWHiv~g;l^)$GsFRmO@0=MT;3n;Mi{x- zefnI`;}6NuDc_qGOqv5s4?@8$Ar3igOa1i54NtCj)On`u7B-9(IegcJ#Dw&_PGfdh zlrGh@Zis{!4#hId>GywtDF8Dn%s>CoN`bVbA_o`xnufDC|N0M4ml5GHVj|?hr`$2m z4ofl{=yVUJg-!dAFp&v(K4h9cXBl*ViOfI$$P$+3p{d%N|NR7@fBWNpVJ+tI7U4&_ zUIqo)Tmyv)RGovIUSv~JfClC-EyiB7ScQ&>90|Ul)3aUfWGw7J0__UGdTWP{h1Y@i z;B|^if-kAe*zk2ZXLV(o4r#;+-W^@;?vMXYSHK~lJ%E@6fOP;#AQ@mQi3${_k3Wuv zCx)lJx$sG@m}R>M=&(e=AVo#OjM-;|g9f1YcsF%1ripB^+uY3HHx=nTNOI`Tk!Dr4 zwWnNZ$4i3HqZNbeHtNlr<-TNK)5Fo%_7C4Lfh>TQ1;d9047l{K<2bMHF5$x;l%de* zu$O)PAgyyQPFj6&#JeqiP5po+(I5#DtAyh6ULG>R+URiR|Nnvx(p|zhd#8;{c4H5V zg|-K~rj9lw`xijUMT4NqMFXJNI8$K%{*|sIT(&aSgP_#4Z81EcDDw7scCc6r{3P5J z8X!5pdj-@u(*R zkgra?T5V;_Ckj|1S=;iSn*aSY|0u&Q2b!girW-DdmoERuf6KQQ%E%J)YU|I5Hm%2dRuM>aFyNWkq$N+5XP$mr>sKzo@pFT?jRi5n%_<< z$P&HxT*5sdW*#I2`}*d9ix5!F%Lt{n?{|DAZDCUa#{K&G1mf*xTf@qjN z&2BX^Pb>rG0hvrbm$sQ#qnZHx?t8U@xoZlNl>hqq86ozVzj~X+CTy9Q8Lc1>?57Bq zjvf8gaCLAylCz)xY+zBnM<1XBdU0@5V@4Qnc$pzGLZ+#p>o9s}S%?4n4?(kJwUI?K zhqMW4BS6y8NS=hy{+`1Oc}e396A#}1;<|>t)Z~L1XuZUP)+C&Lt}a!~3`4V!0(b>5 zh_I)SNLJja50w_uFK2F6D%B{=Amu~{zClIB!TL^<>}(&z0Bol10H}B0Nn$?&G=0l49 zy7gjxA&Rol05?Qn?#XC=F3@Idv+P@DbB1D_Rcl$vC$ONfj857`@XEK88lE%Z zv0C-~mwqD%qp+BZ)Oh<{@AYYYWp}WNQ4(O47!=5k1wVq?ln;_q_Uv$Su<83?r+tBX z%)UDqLjJoC1jb~8SiH(F>>;Pq`|e}*JxXT#`L7la!GP$cws~WLEtfr!?*1Q}Wd)c_g@MRHT*gn^n@fgX!;Y4-) zp&IX-eZ NEy>S>@;i&=W!d5EEcb186R6Yh6iHWs0tbE?dCS4P~?#Xa-*^E`I0h z%IXL3BnCqP@{AS^!$(6zPSNuTy7I5@!EeVR21|vpxy)SyBw;K4mH@SYB;v&V`^8v= ziM_4(!4CKD14!ayuNw>eua)y~oFT@DHllAAL3Ulc?!LcH7)B`>s5C{Czk*&5LqS5` z`|q{XsLFvzD+hrQdRj?yN)n*G!=ab=()cF`<$6at6dwtkVa6a-Jp<*g*0&KxT5eqBO7O7( zAQwk&8wjxW5$_L!Z9*CmcML^PC>XZ++E3^y1#RI-oACSUYSg{}puk`c4g-*Y7K35= z-}b?}kJ|`n*yrikoU>YYpmQJmQsD{%8X8gle>8Ug>fZSW7y%qO>04Vq?s>qJy(_da zMZZ|R4}d2)`YZXJ-UV>eo1Q+V?c}`SVdxSCzCc}3T2*Jr5fcph9HF)Ajus;yVv$2`tUk5Omgjt*hG2E zmNSPIr`s4@5Nq_#Q{|eGJ;Uj$fi>2wSh*D-p-_cLKeSvtmh!Bmftiz+R zlmi2_2km!2(TvA7{GQHV*0B~3VOjh9M5ep^ARxZfN1D%F;s$N&n->+;nw|$FcuoaBke4I%PwFww!)lrG)e99@wbD3iHnRV3=D4{mQ|GY z-LrrHpzpr;fWNBzzpA*|d2KtWk?%R`yBY=@excuEm2d@QFeHJ2ox>&>6Rm44B#w&x z`BUMX?tMfk55Mr$` zgB3r1kUrKSCITHJN3{1YnbkHZ_PeUw@3~NrHLMu|BuHdD-*O~w2?^D?l!k(A)>GA){wHRQ5M4WJ-n1(PZ!T*dO z^W82L9$i(`6^HR5TzN6COySN^YnOdrNFeBHbq7xn`7IwmH)?2dBhdVzgU~F16MO+6h838tpmv}; z;`iht|*>0W798uNVbr@?? zP;|ThfN7oz)nJVw_fc5CWo2%~+6WdV7{~FujT}1d3zAuw^eBNATR$t{zz4LQ@g`IwCoEIGJ6uHt2r5n#B@%y%xHxy4ja)bgYt^L^ek3^~fF;S-5lQe|p{U3o^v?M49vpHu zcj_*!nS+2`{4n}9er`BMG^0miyb-Jb1OpKffDs#S2B)S!U7?7;CIaY-!Xy)i`pWhY z0$_li5Cq;OfYgOE*UtyGk%fZzafcE~M*Rx_G~f9JgLtrppw@4D4nmUN^k>8XKj=SQ zW&jgJK%X--6u?kFn%j@2FFMeC0BH;|ce5&16F}x5!b9MuJQ0Wv3_|N9bG`^xQ>HS7 zreOW@`xXGxAphyGmrXpkYeq2m2E%(00jRb=_Cff1BB3Vo`#qa z&6U`~@aYpH(9>`hzDZ|lS$NmIMBVg1=IH1~FY2$Gj~w=Z`OeG2Toavk@Kw895NSa7 zwUwL$8sX9^tU3WXX}^E{`?ci4@v%KE``VOb` z+fna0;H_y-?{6TB>fndTFRK04L~Od^Cdn-x_5N=O>ZVk$0s3yS8=g2TT)eT=9=Uxr zfM0mhe7%Q9%Ex@30t8S?9nbOR@(G5b4)$AUJ`(ZBikuaO;3aE?@XH}bX-$pYPcYBl zq`FMMif{gR+J{_SM+!I74Gs62s;+bB9!djQpilP~T^dV_d{!Hfu;ENcPM3?sX@NH9 zc9-ouEB`^I$7YPrGVfKFj19kIZhsoAm7QdfiRW(S--2c4ME?0-`V1&l)-*N{KZ4^e znMh(r4HdPkJ}CONzXU#w=ndK>yZ7>9#WURfe}9Y%f3{cto&OjSE*#ADL)b=fs*UFD ze3e3p8v9(gCEqUBM%Ap9Zx9+3pBC>%WLGkd1D zgrg`WzUYLWVZtL3IG!D?{Xcv_o#_`Z=ro~Xw)OE_*ibh^8yqG=Y5yG){cBklek>EP zcvh2Ih_IoXiF|)Ay@u=Te$PFBpyGwpMiC0J^0kB_zp(LaPpka`B@A+cJhxxyG^*o6 zXCWOL18qrGs`LzR8?jHvOxs7h+wtvN|KUi_hH)G4C}n;9?_Oe-BOTFKm17z8!(k`B zAI0#cIuxt@jA1N|TN#3_dwQ)XKlgIBR<-!1tVBC{XShcV*IerB^d)={c<4`s1bZ8YmkZF?-Hr6$zA*1&pvTE)6w>MZ zDDl&WcXH$lc$90*?;w@zmU-B4{xbab&vl%T$LP*M9`v{2Pk}pi2K8mTc5iFo`nSq9 zW-LNt{TI!_=3ZC|>;G@Uj1RlUgaDPbHXA=2xzzrq%U8|+Lwx0|{m;4TOUi!st_l0y_cTk3zpPxv zUN%nglVgMwPI*}bnt8|1gGVZa3up3-5wcAP{3KyB{)M4$+g3RDa=rc9gYZ?hdhk-z zoca#j8^J^=w|CWYHEJco*;4=j3t%DYCbreF9$ z4+&aVS}zE)@%%u2n>{EWeM@KDCG6rWn$zRWOHG%;iU=PAUi`Je{>4}P!~gvRr6c9B zmW550z@8*U*M^P^W`=YfgM}D7qtG(a&tZH;rkS%T5Te$f&N`*4maoc`wQVXTvR^R& z|6%-u5YU<(PEizokIX;(oG3Bp&>Z7V936*OEC9#ZvDL~1`D`J<*9acdKn~YOsOw-) zYt$VU2fb#iIGsdbqX!Z=mKy=a!Qo*k%`G9su(uYJP-xIA?ek`1#ReXPz&J=B%B9#r zNDd5dWRkZV`gE#qS$(B!Q1e)xLBUl=7`tlE2LzZ}g#(cXC_0QF5{cJ-#)A2TMHpCG zIWV*wnSOHn7zQGt=OCLle%4#TDhD&eWDSr-8ZZyY`+NlAqN1@5eeM$M3X01O%w?U4wS7A6!Qw+ zD%kprPpe`%b`9`2EbB9Y0g5g=6qE*oDoGAMkgVC%LEUM(SAOkQ?$u+ka7O${{$Zm< zqq_kPX#v!ahuJbIn|xJ+wik!D?mx%39LF)@;emq10VAziq}&_;7Qi^g>5moHY%XLq1>qA-CHN#e5e#e&&U zlMeAl`+#!N{36`Dj^mg`LZO1-5!dUZU(@T?^LZPe#NU1|!d;6EfeI(Os;DuVmbjj- zPP-xqu{P&YMaR=R^v}m)9}(jh+zaM5RzN*fbe~Af&cyEUvWI1ZzMcWfUFGGl;vCCF z58y-ss3of3ZEwRvfe!cYgzMkq&J!~AuO5CGkgolN&=A8y+r9S{%|M1O&Jb0<70QoD z=grq##FQ!m1^*RAp1gYXl)LO1s0jd}H;NtkfZb|enOndT*F8VBYd|L7%`5Ivuu3&DUdWgP;UEeEh=#tp9=j0r^09th zyqvGw`?uL!%qJ#v>^o5>1mP=MpoQ1A12~AH!e!oB4|DVCWeMSh!XZ){j8&IBZxr77)?QqYN>d-#-U0 zyI?q8%LYB@H3E>R01&EqkaE8wr7und_!QeBk4Yzx=%h;61^}MgbxKSD_M$ z)K=dP1g!8(90YtG{Oi9xy?+Vs-tid`#V9&VOW|>d#vdLF4BzY;6$<%&5`j7jVG+b| zc3{TC@6x)ZNA57}^6tikLQkMA3#zfC+TIQ_ufk+BTL_F1d{wKT(8_{&a)8f9DBK}r z+CX1na+51QL^D&a)@f&7@`!TX(?No_lz|i{r!J3WVQ@-l1WZ8=-G1h9h!_tNf{}7= zZcX)$s$U&=S_4^e+Zrr&m~8ODK!oC`_y@a-1)Oknzly=tudmCWeZOE72n9|QW!$#e zvUiGv_#IYb7m}+DSB=Fi(Dwr)`4b0f$!Uq_Q-aK*8>3#G;I(J?J1-1=YWeYQ_$B8L z^Pz&Ir_inRDi*zqDvJV=Swb6pJ*az{6FC3~0}~yI9(evbE>U@a;%tVOWGB+6lls5jQCqz`g93cu6JYgDbru}}3%wLNlpN`atg z(f9Eb4Fg7v1KRN!ecIiRwfN?NZJiaXjffJ+( z!jNkca+XXH&5Wwp0SG}z0s#hcwm$eO3W4CRYd+4{FMo@CHZ)MBgzUmC;L`0N(t(!R zQg8k4znJ^=8Oo|CgJH`#3OW`QysQC_t!Xc}=`fUFnPqKyNo*BXXkqsI54ZRs1SX`H zgy9?XlFNn8AwjbBAo@ei2IYHFnCScB=M-=6epQXK?6NL)%r|%M@NNhO5bW#ez3D|y zAAII;-HYHf5Ao?5JsLl2xPcZAVEuGP!LWY$Qo3 zka7_YC)ukBAoQ{fACllO#;NUH5nsITm(K<4Iu&6M`jL3MP)c=yXHD>_U*rApcuBe- zlKfSQx;2nH1++s5K$%rY{zKr!L5@5bu!|+QKEJ+fVwUj^E4&=5kD3W!SGE1VfCbJ# z;64*>te~unM@z@QUf8w|$W0$GzD9`8cCeLcf2V8-{c)~yjBT1uji9i%g3k!wq z{)5UuK^;LhnhhR*VzKWElTv>mDI|3BKEA_I@}YR0d>F)vXz4TGR`eJer{$uooE zB_t2Y4q`)?mHX)a+6U<9EJr~7errgk?R9vGBe@@nupOMF9kNRf%7=hA;cT$QPml1< zr+NCb)WAvz2Sux2)a@kiBK${B`{C475C2c}Xe+^*SxT4k_(BVQH@oidYkLh46<&J} zm%(-XGGGT+=EL8>V&EGq;!_f|-m_JM^$V)<$ZOClc!td~&q zzk&ln#^OJh!vKgnJT5?5f-4+2!DP=|-=)yps^m^@o2~-@xN?neL&1S&tPCUU!oVVd z09TugQHu+~$6&D-ay*|z2qm&O4t`jZ9M42d0N#nAVIs=W6?n(7BIL*a#d5mu;6?-X z5DT3|VRZ&r;0~G!_uX`sHgSXBwXCm47hydg1B!U~WwUq1@SIAmezp8U&CDYA@WESg zTekNq0l&t%Qu)fi>4@X!GS{FV-mnmJYs=T+)CpL!ZKwmsV7gKXQB3yceey>4&S*7) z+8#E@T7B&OW9%*`^_HZ=*{)P2&~>xp&iWm%rG1en?|%9oB;_e@)lf%%!O};iP%4-Z z3-CE<2G~p?b6OOJPg9V3fy(JTeZj#@#TOJ{;8S>{@E(}_9@dn{ijx7Qr2=!Oxp(ge zsYSr+SjSbO;&tQIb-yWnH;j4x1S0leW6xE{CZe}Etm?;E?j_jii!+7d=Dz;(7T^1J0}uv=ll06-8DTNV{Z8-4+o<4^b+ zy3>_>7OP{5jeIl7FUqZg5IPM4-S<=`9Z~{dL3O|)JTHbkXk}^*+82W^ z$B+2F9{0n(LO-gK7k@8YPoI(zn!f}-9t2^JB2HXhO8mQmK=Hv9;ovI`T)D}JCcKFm z2Iel?e}$n3-UI&WR>E&_Nr-2u#)$1!FW<4&)V}udpYxu-0zn33~Vg^I*bZZw96+g*cxAqorVcx@GCV+n9g-!7+pn`_mwHnM-NE z0{?o?Mc4M*m3$Ay12hVuFe;A(tV&2!&@_YW-m#QSBKe2;p{KMgx%u$}hnWGg#D}Xt zqq9X(K$=5yN73E1*R&8Be@{2+oLJH5m~Qdt1Rc`4u`0*;JBAC@HS=Gie~RO6meD3JH+~M$R!C0~#4Pqr~~BfRk;| zMzK+8{C-6#vR_}1%%iLX(jeuiC+$!xon_D}v$~tlV`#xOaObVMfSJu7THt-^H`bg%rEu@$qH4qIq`dlVB zseHF!h(H4q0yta)P{3#i0kn`TCpUoie4Erg3`4!68LRRO5`8$uGG3b&cjY9}<^6`T z*`wlH_QP+>CN_iKQZxH?{4iYsLI^{pE>B@!|J(ZcOSOs61+)hciembMxttsgXbPzB z$&wHb9}+O2-Cr-_d{Fx2T-d}~=GYeQPD8bznf)7JelJ@+%>nN_>AlBf=r#!7cX`L? zxBR(fAY4&WVz|W~74w2II`{Z!BIW(})?Us52_69mFgqYs1xK3$ld0n#`~0iQ>K?U9 z0n!2{77=7>HmQTjfPKka5qe!xGt=$wvw8jZOu~_d5AVvR{6eN6RIBVRC4_|%(YblK zdaS8nkUcJR3Q#4E(BNPyS|4M?D1VswhyVEkML7i>17sF_AJMJ&ibh$v!;G>oRQ_v1Ni*ZIX>=E=@cH?OdKzm zI3+Y5ycJH0|eUdA` zXLsro`^+gYgYce4OUoP%2~)&X`rEC_-Z~KzKUdc49xW1NwS_?ZAg3|Lz@Rjc?Qpd3 z7q{6MW&89M_FC%-3F2fdXl){fgPM_bBN7Arryz4(oo3La_@b$QBsjtM+XkV=>ZJQ;*q z(hI*xI=XQ9nM+h48UcD63T90L0t={=Bas$qh(Eno?ej1;%@M*ltsTt?n2;wGgzS_mUlFmdSES}AG@_Nf zI0xZc?|fDr6j(RqcZ1+aI^`fEf-r%p^@L0|vz@^mg%Me5nqMiK+Z-})p|<8sjfU-Q zzvVvO_>U1^Ew)hDP2ZD3g(a@#)R`@U5n{xf#PU?g>nTko{YL-`Mwg99Mw7KIee#($~yzl6E=s z_@bG>4IU>2s*N+lo?oQy9A}nr54O-SEs@|f9ocUM0JI_&426d<5x^BdK>^;p3t3EB zig}Tt4|a2ff&LQB0_Z{!kNiQ|Uc>;E5)~8zm|zmfs1&sWmRV$RL1$R=8rHReQ2GwC zVN$s^>LPJsYg}G#lenXr!X*lY{Wa4s=cOn3KLzA=`H~+!55YzE)NNlGvh2mcKlTVc z=7ivXT~JcLeSX0}D#VBv#qnE;{1AzDhzsm)?0C+#-bQl%+B5R<+k9#pS4hPh(BIIW zQISj?@U=>+0YFGB1^!_mm|#Fj!mmvV%m)gEC7lP5q?T&$Mb{=U9xD6Fd%OyxAN)sP zCEJKCakhnIPx50+{FiR>t`7k1gOEDl?jVsxflMr8Vp-N*9<%`eK!Oh=#(tYUM%tog zR#GYQSGbUw!f-@{af3jNck_GW6aDD)KJT_)NPq+c08pT;s5`?4s-(T-SD35{V1S$= z!9B%sTc^+p&=&w?2f2JW3kpFM2&5g4xuwlnUV>1SgBrq7j!&oU^yt199B+BCPx+=2 zzt=Q4d4>jpSKaYlfG#iuxC%g;7VmpPhJyeIzZmZF?Vw)3#;u3J;2Z!Xp+$cEa1USy z8ij2m?09XMO|s|oU!tPidAr4qMreGyyV+ZmtVnV3HBMGtzoBI9;b#>0ub{YaF2^mN z1Bu`V0Tl)P;&h&ImI&?dpRO!NYZV41AM!bG>p^OI0UGfGafXHvT#KCbHp+*^2ZHON zVBr3(`zv+{P%A-yPh^_;sfU_PYFX418-3TsXUluxC87e$@m4DQMu1i;E6Y`56W2J4 zAC8XKxpgqJoevTUKp=qp7H@XuppGMUXK^<39siIM-r^kJfH0s0+V1IP_(Ct3O(Bna z^7IyD2#FMu2#+Uph?g%tn|z8zCoDi6s#9LM!iZreOndCx^Uzk?4;T=5T1~H4RWhR@ z6@KI_H>LLe?0tN%g@%K`!SJwLE+P_(;eC1YFN$rxpF%Wy24V|8#6>}{XyfmTiG86L zlG)nrPbPu%DF&OApeVU-Uzbo+55zH3Mxmmu&fd^;Dd>lm=olH~`vw~g5s%fDSHv^A zI}nD65*!g=lnNA5F&)_s)|D&pk@2P^{4)2^qQ0Mn7=OFngY;-ghQCjhd&3XGft!Fd zX?PRfzMj6d%9av%9#JmiiP=*|96u-MBnmK?p3U%e<|z^1pf8VeZ_;2TLAXa)%q3#d z)nGKc8PR5-h<^+$$^)H}g@u+LIu>OrC~~L8EG+Z9sbTO^#)Xhz5&;zkqXc#s^8Aq` zf({+_o&T;t$#hh#&Jeq}<#)<>u_H=0ARs0yOiH0L*eaTBJW@irZ)G_IkJ#^~7##?I zanV0B?P5id(ekQ?yh$W5m_>9#I*p+I?3Onhsf5YG)1Ni)G=Nj6P*{C}ln6aw zB)}!%gem|{DNG6U>KXs5U;F$HtxHs`64&jwFNlav1XL+|La;azY8yjyl@I4?nu;LnW8+@V?q-x;I&H%ZTa?T6BoF{rKosf?4NAR>5V8&fu%c8f0OE|CuxL~fyoRy! z=PkK`;CF>U!f^PI_`s8sA7h=IvR&-LnOwB^1RxKf9?(1f7Ye2TE%o}gASQ?S{T9%_ zi`VJ%2+`y`L$Ns)P5{IVLNHJH!HDE$%+yaG6@!TPYrp;A)0%*Lemq?|ZNC-#cjCQk zS&|140TBCLa^Fk7KFhdt6N5k?6G{;sJ;a@@ZIMmN?gB6h0wfe40xO{cTN%v?3}nRF zSsM}y+GA7|v;{RimRV|zm!hH=XiShPfPn;@MgV_!)&Kws!6EE020yX-$4BZD%?SVg zrRezY)kz=uc=e2@_(p7>NdvS$ka|&gM6anK9r9QoT%Jr(->Ql~eG{`cK5o(ZVz=Pz z;M0tBlIb>nl>=#ef0ydfGA+A=WNi9`b>xoyR|@`43H03Ch3_28nBNf}jA4ihEW*IW zl(soK#Qa)D^F(5=nbwM=LzWy$7#vCH2P}wQ4kgn3-COJC+znRqWzHKYRlOrX1tu#f zog-s7FxhRt=8xn31^xGpQ(^4kL`?h~SJJ`K|8Zv$@-vUl`cVJ4zhlrpmw)QzPxh)i z^wlt70<=f4Z0041B;>HKg&|n819kTo&?|{&B((oVAOGw>!%3q;J`s6`_9Khv`9KqgQuJrR#-T%)O22d1*c|9zwNT7L3oIb1CP`DLscQR-@ z2!+OPz5VOU*JTWWwb;F11`(=0XA%GRY%nTk&cT+SwVIm1kQFk`@~?uf1S*IPuchf1 zjwXJ)i4{+w{!wK+{3MXRpH|cAJM~x1YvEl%kK+6X3$aH7gC~OmTB=wxEDxeU*%zh` zC5GTfp^^jkIUUh=b!ljUjSnwCH)NNFVuLU|6Mr| zVWY98Pgmo(oyiI8&2FQR^-Vu~LCtN2>yFrz%(++@kK_wJ?wg`04!^Sh8H4b2E1QF% zU~#G`j`?o@U|?r+KoigcYXJNJAyKLg5=;hIttP(nyjIRY9i=}eG?LRWsaDW{08}!M zPv#n6?(hMC5kCV++FA|+_;-~b;RhS}cZ+m>WfHRfU78MV*?+%YIn*M;0+(9z0q`e^ zjRlyN{;yqPR33iz0=N~aebv{+b(`q{Tw7r`7pvbh596n?Fm0)ApDpy$ieG|Z&tp%R zNxo;f*t(urNWz5W;f9o-U6yCUK@r!hq`0pvXdmyJ0FYJKRdOR>JD5@j z7yH7_5HVBEC&Q$Q)WrmZ3QkvQI0s`=?^H3oK=g}oULjyt@+plF1>g}7hF5od^X6QG zCbr#dEQkO*(t`afo^gG)^!j}c2o=&5yST0Ic#Nqclmqx2(u%TTt|j?_x4~p z;0Bb$AxN&C{I_i;6}p7s1b&jROY+C9IvlHMqIb?TmghR(+p*|uPX9f8o9rhjvH$)2 zo?iB}aJcgX8p^k1cj*OH*xYBQ>6r`VqVD=w8fEZ1KPIPyyn7i3^9s!=w;dkOAZe0; zRSq%D&3!Rxe?>RZ#FFOVluRi+CWmj=tjpd2_rFcM;`4DQEdz;RQ3-KM4kVclHO(<# z<7~&F$gjSVd9V zh*44GW8Y=z?|&<}@ag7e3z{GV$Sb*6j0`}BUR-jt_NjveyvmZ(uJZ3Ba_{yCSar~a z@7&k&*PLVc^?Y~0?APffG!Q&*UW(K;F1RcfmR8Hdz-xG8sPlZ!wmhG8Oh(8<;1wA| z08yhoR`^Efx^nO z5*%;f<#^f>H88|L<_no_x66@RVv7e7|4%_cf1V440VpGDWqebL;;=i&Csm5Z-d(G9adGsef)SN^U4B_~7Q4o(_6J;n2jW z@O$9vya!5kDfq-7_uc!ocE7o!-yTmJYt9a-era-kjm+jC4sje)#OD)V@( z9lTe);Bf;DR*WuZih7I^VLUjnz1O#jzjNU&@3o1=9w0I%|9nG3dd+A+b)bOR&C`H; zA4SwYAcLw!s^4JK?K=3Ax7d!t&@EXL{%6_0mAV=TjLc9*7y=;RAc6*l5Fg_uS+*rO z6|^8#f?f9|8I}W%tEfOhO>MOo41xt-Kz&`L=6$BS|t##0`|BHSX z9qX(fv-q$>Y0ce_mbZfzDG;oBw}S?72GRnqOFd;HwQxG^%i|#y3%`&iS-G34gP2k~{9V7tUeg)Hh0R(${?pnHOYip6+`iuXP*mlD!S zv0&i7l(w5G1hbf6swqM6M4A(5P&;W6*@bit)ksyC?#yXS03Zp-%0MZFz>W!caD~Ib z;FtB53^-US1Kj)v3o=Ve?#B_7U`XsHEIJDXJ^0PfyS?k<4}N9yvn?GD%em!b8PJ}c z0e4dE(08P32KS4X)AE~_))ocFIwk)VV|p$lefi7cxVQum2RKBMh7ZBSxC{{oiUXxN z5Vx6py89XcTreC%p@svs(CMw(Sn#uDh25-K2$7!O2|zavUc6)y0wD}pkH9nE*|(kq z?^Zk>bvG&~1lgFx#{x~S<_mN{6hQ50)C!%Ri#6lL5opgJSN_YOnOh&H>#?PK9nlTH zRL`FC^B19Tynjq8NL8!H5U}ZfHLD@;sDS%_h6c6sTK~6~uM3j{xO@;g{qP)yRN&$; z0#t+wdEnp0-UnsV3Gf{24Fo-MfB>D4Vrr+0=7c7X!PU=X%u$oQiia<{{ku+H)FTdu zf(9&Ydb-Aw1D}WTV6w|NrYk|8@J2lyOY+)=L{3s9~to{x} zUV}cLFn+mBVHiMg(S0z!rQBcw@O$9o!Ifv4k)i?sQVC)U5d*J+DtSLAgPub(&NiAD ztX=Z?(O`N46@(6l+fi1YfAKmM2{}K=b{}Yl927FkjNkwO2~;8NGzK{M-A2kkS)+gd z_|Mfye~rx>s3957BZkc+Zv2s-s*F17C4w+iPF|lB(8=pAIx*^Ru6293t{z4Qgh&7B z;kx^O(0p11f<&S5MZw?G^LuJHUP#Z?asEGWj#Gc-5xNTCaWF~)iIBWlmX?^NJn|{O z#Z~tN6@kHXD8NPx*)({Mo+3_0+>0G(AQv-s_Cn-+z8s&qWc}OTa>S7BwFE9EKnN zNWz+2142X6P;Nj5h_gQH+>|EnL?Agw?-W)kigG=cSQU!IxzZ_|peIM+07=d+;yMp* z>ft-n1KWukU;-azvgrWD;dlhP5rEW$0$lXEhB1!45SHc;cR6IP{(_|>IFyC_QF@8M z@Zg{ryfnu53`kz{6NdAL%wB<-j-`AA7Nw3Q^fV+kGZE-SxxdHy1T>3Du8e=P-R01b6d^r5Q);M&LcQ z(MDdodR}Y9xAOE*q;J5R1h&W*|5yX)IL%%`Ch5X1|$``-xY{+E?Z)tpaE|DK+fNNeo?(Q%tk+#vzH#vh#!3m;DX(6 zRtRDvtz1d;fXqt)pn1Hz-vhf4J4&L5%>ff7QT~F%ResluM1bJxOauWxXvZ3RwXV!l z6mi@8@KlA}{9ImUk6UyY5M13d<#&F+e)I&OD*--IF7_M-a|kG{z5kPbWCbuZ0HS8- zC4YD2cku7QYVY5#Ca>3uGw3c4p$_q2&^8Pm$RkXeL~+O?iXyZx2K<`7&*kq|E$APk z5rebM{-QsHN@7qB+0^He=eSW{|Ls^)f8+EO;OipOIRBrM0C_tRViqxKFN1;IRe7Y@ zLm;3t5c*@whZ9n-e;=B30O?fhu?}zD?|uTn2WDsZ55qz2{23~kXh~mpcbGc%m<9~3 zzkR0wt1o_b*+<+aE?i8=P!I9Uf$!=D6aC_;#fJ^(bz0|IG0@n zo+!_Sjm*&Y+5yNG6F*;(*a*<*2mm{b%GXtYyUnOfe+oekZvF*u68tO=F>W9h9}>Bg z=RtU$C4Jhx2dDz;dvV@!K1gJ#i5wY@b<@nr4@#6+A1FWtA#UqyfD=gxh z2PtiXei1E$i#A~G@c4&8GTOSPhiq}!KEe1G$4Qqza>u?CC3461Grg^(KcmX|BMJ$C zO9p^3phg22(w{+-su^JI-c7)6nXh+{+CLxeVeDURs4>3#`!N<=i)mv#UjG%%E}z-8 zySv~VNK5@`_f%q79UaU7;CP8Q8ZQHikpTi{ zHuL4|EguU*Q^FMAplaxLVbO;t!tBCEjUaPkwFDugc1xe z9eVsCYB9T8-r!}mXt;T)R#q^p%S+=;K$lmd@r}Q>I;MnBFy32-^FoGSK>kRAAj>Fg z@E`JDI6 z91S<_EAQ&yamjChErHH1{wI)CB#D|EHJs-QUAp++72fTJoGJp#|L?wEJ83{Dpd5gq zt1|%651&hJ3zioQ6}a6ofu~Sb!AhBdl;a^SY}Iwpu%n>W-If&v$R1ZCimK&2EoSSb`JvaSRSs2lh^P%}fxE+Gvtk-}w9;ULsX504&@GjmN9=w-jYbQw!EaA+OO67IRTe*g&L zN!RJ(^qHrZ5&!nGL00LzyS?m8DJ(&@5H7Z97~lj2-0#|F~!>WCtX9mVS#1 z0SH@3ajF1_+hXXfiQr)`V26WaieTxB3~|_{jy}}?m@HZ&mK~tV4OQKPsxhA z;#Pa*VX9Tp?O6&{qszPeDpp+z57*l3Tk2PcX*@0(1omICqIH6CLstF2cgn{SYx{FW zg9)8hHa82E%X`mw(88*IXvbVLVj{6Gjo$XVzPynE6YzE9S2gz-2Zl|iwSRhv^~Yff z*Nz&HMgAtDE871Y|np*b(^pBV)Q=5`y9+z?jY}GLIxRBPnW;|01KTV?l=ZG@iFy- zS%iG#@(b{do8!l@MgZ%K0PBX9CrE$*R2?ni+PiRHWn62SC5L~g9bX) zKk5SlZ!-#VRjy6MAL_VA#-ezBPfiiab?`2ijeDG(!@*$aQAVk(^|dn@nlj!cW($nP zaUOrtbi4P6Ba3Js`ShIb8p_Fq9TJw!PS6qWTJFtJ>WgAp>c6K3t;x*Snjib zl^?RLnRJY#y`NX|jpYr5by8p3Zf}d5sR*E%5R|#36RR8v+PF?Laihh6^2@y0?83b2 z;I3y7h=MDbLv53QA2*+lPsfQvfob(BIGz^mVopt#v@7Yt=-9f}sQlDV&_wweQ}69f znX@!g=8pB~U962OHq6r>Jt6;nesDl+h|X!h(EPl3GsGp_GiBWau;1BsZn;?LFUmo$2Zz&a5Jm#tyf&HSWkEfeM_3;yP$sA#zG2~Ub|0k605Zdu6GOM}U$a2AchD}>rv46IRw0Y% zNGmtV!n5=l5}1tDoZU>CJu0}K3HEc04-N`to?kjROx#us!qX74#a9RZo;8_oTWoRw z{sD9-*NXM-;)bb2xPX%|VBa8lQTpjSxo}o<$99B|i5-46x;$t59Ut6CkIl`0H?Wnl z215DNbh$i>we)p}dJs=!HH{4HLE++1k3Wc2nF&l6`d)1C_|k#iUxPpaRLD4AqG2#5 z(cA3cS(vH9>K6^Kah2P_fdBHqg~r7*!`q1wvm>?#$RG~V2e^x~%(-h(bg>RR%^Yz= z4AKWyI}%8B;O~q5;*t^yoHzzxQdGqoxUcW*RRS_rCY;a$FjtBZI=$?1UEIv~_9fjp z+5J?qYpVL#9@K-V8U~Mf2qP57ub>k7bjk4B5ot@C4gOPDzBblFI>sR#B(#Zk1Ko>} zcZee389r=#{NX`DPv9?YxsTKeIhwUFLeMaAV6T9%G;!Hn{~q<}_v^-e{K^M**ti^t zCLnVAzUhb@oA03b;1;2Pi|?j98K za^Z4emkXNM3AhYaE8fqSb+W(BgP?!JxI#>okhSE6~ zk(>5|1J=ky5Bd|R0wAa|p80X`9n2ij-*U~H|G)yqReiz5Whky>PN>OIN>G(bZC|K{ zmokpu7+{1ozkc_{sA0-|2LPZ0Pz0g1tpT*tkV1G(EWv z+vV}7d$EFok&|Wb-=yH(!bHa>zVi-E%Ah#VB5c32KHUeKCqpuD)OG0X4dT}VvquX`zJ zuDw^yC=X#F0CBVoI+4d9#s0mo%Ye3)60uV27onhvKb<~-8`0m_&C&FVBVhYq5Ir6I z`QkQ@oaQ#ARXl)z5D^B4MhF{_j1AiM3aw;|ruqAz>OGiU?@*b*H37Zx0#p(J=mE8V zJR?gm>NMq@n0I)Eg5!YI_^gt>si4OAljMgkV8^0buVxsypLpUQwqOEv5DSNHvSX48 zfQFE8-$^@I!(Lt+_tzs7Jr!B8~(1d9~%5aMr<=f=`m(&e1%UvjSZF|SQ1z}R$zx6cIO|JIx7*_6 zsEy0`A9N}t{3(CIkU+AEpO&-R6vR-GQ!y8d#D8p})AId)vb(>P_$CgA%CMotG*g^= z)DQ&sL!m%_h{z<*kb~+=bbX;6gRpl;mLsFl{!WWj5I)1l9$djY>EaX4`S-DEHE7UbuHkQ0IZj$03>uEa2YCkXce&~73GakS#+;6E~1X8Xn~Fh&K;5o z@0;iySEOVrDVL=!Wj!zjNhzP*IS^Ma#-7&l=?VnJ8emLtaVU_8MA=xfJdnQph)HxG zZ+GgZ?Qsjrnw-7(JA=3{1cAYj=rOFX@#K#T4P460?W~zV-T&IIZ&R1AbQnRSATvjv zo%Zu`*WL#Z8DO3%qW1&3UoQSQ8wnuhEHM0T+Fo042n+>aiSY0af&wt0XU*`v3t1Uj zOnRt0T3|J-1k`eo4KE=s%C;~_J_Rfz%yDCoY1DLjAF!9D(M7J`TV*7!_tNrrHvGX4 zgcsm}j{6=BfM^fQ=J;Fe)D*!$1}1v*Gb&BH0a8HLt8pjzvX=fglAh5!K?j7E^`UmrvsspbqS1MNUVrp)Pg$6_gZY4bS!zLsBD zKl1hW*ZRuZpj8A=Ktsbo2@nA>u&~h*r~=Jvn-25`Imx+oxz-=lZ=->&CT0k2^z!Xl zLzR>`0Ynap69-I!76o2M9WKSxM#`$gL_ZJ#S^`K6u(<02qq^fJe{Wq9Tl+QVuqa|7 zQo)5w?eNfz51c*BAEY&CpmO#aZd+VWS7>d@(@Yyeuq+q!JgV6z(cd& z0003*A@4i}IB)$AP}lmX?R=uQ_>pV(ivLwH-#>91b^}LPW#Kn3hw{R zwfYj7_(9k`FSj>tx&PHwW#+JbatL?)5gvcKx@Z*QT%DGebQg;4Vt@2;_G#)4Xq7W|lfh+HuvUJ^kCGWSB zmG0ty%p*YJc?7@m`}sBe6Robm>3-dE{1c5yPF;f3g|P=p%oYdU%SgubY3t7N3{Uhxnm4V1AJ~ zwWs2g#5d7=t*)0wHisv`000XqA@D#2xc3Fp7=J0eIzYz&U4DMktgZpqK{w+6)mi$9 z*EYYFys8U?fiO9Ok?#}$rdPkq`os$$VMY6X&%ev&l|_w*9;!NP_x_+x3KT8FhsbR4 zpf|F5moNC9ZGZZx($|GSpi~tD7?T@om&a;4U`8qUmYfb?2#`8=-*;Xj14Dzi{Jsss z=r{vF0XG5=pes+j866#5H<+Py%zkdW(#`&>jqxm=q&vvz1P$P-QqzCp9)SlG0cmX; z$%E)TF$0n1^yS9u?gwxL@HZxDC3~j)*^#1~Z~Fj|nAn!r{MB+HD zMa6sNWsegHXx^W|b3{r-ric92!nRWn|MCRAR&a{tI*7yh{B|k~W{mWM;4m-6O&kXP zXt2PXMq(Et3IOk4`{*!lN32|-*IfHU%LmZiGMEt#2W6Ac9C$g;P&Q&?^gy7Y72; zTryX?F4H>0RuG~ubN~JMvepkmxCW>g0aXA(Kp`VlVBn_#UGy}LWgG<9**I|H9;Nnz z0#{%uT|0cQfR`XWR7fWTLyWM*(NV9Ffdlgo@&1BhNl(0%jbVFU|4mkJ#y{fy!P=TV zdB6YRSxDLX=azxkJiC4??UUUI%0Y!c|MtASgJ9?a|HbY!%3ZE%;fLKx41u6H$%U33 zIRIeva1YyBVcD|8t;WA2HD5JF(w683m_P=QPy)yUOagWh8WC_*V{-onjg8CgD+Wpp zfYnR_BC4b_+$`)*#N0%J4mAsVGG+=Ghy|Mmaxvz0q|%D0v;gooF>ZVv7H#yRsC}|< zJaE1WrOfW-VEc-#6KtNJcfs2k`?C|IL-=GdZ`X-G?87b?+yfxSqV2Abu9fUeCk1^~ zBH*Jf^5c3o&4Xb4-To^5yJgRMAN8((6Z)wbIppYF#MtL%Hkrv(&pYCKKnx6FV5@I% zb^2icgPodVdshcmlMs!VmyBi(#NUia5Sa)UT8h92{OPCv7)VK&$F!+|jCEbR;w$&P z0h+WSI!t*-(9WJl#ozt`WfF1Cy>D`Yoi^h zLBfQvf9kp1JimVc=^3uN7soLnVDVtZgYpRM zdtiv^p>Mu~n96=ueHW)$iX{)P+8033Gz|kl&@>GLsf5MQGz|kl&^iZo^eopveEs(^ zcDJI5czzD#dC*2VeqB}WT)$iIL<%G_sIT!P#Pp4{q+zL0Qo``N9aDB?SC^`+Re`+a zqOda$M8eP*C`_?ZKYEJw@660!AM6o*G4_uV565-A`sZ-5q`@eshsbPK#}bqMQQd=1 zKS8j22Eq0H)A~d;Kf(;PyBzG>-%{`Bd>2L-5x;vhL3CAc7#~f2^&$h{ItE-o{lW$F zNdth$OF+yc(qbc-Xj)~odw!^lc|q8a9j1;u5(i>lVq3JtSUx&Y{r%R-;@GP55eQ0xb0EunDFr(dDKZ!G@Gh4Dp93az4*HIqw^WD^)E2QJuaz*0aM-e zif9?u!f}T)?_4l1zr0Bx=;=-+e@o0G%HV>fw%>(FhAKCf`>!?6Tw3jUzxR#HeM+ag|=s8z3ST0CtE(ikgse=^< zZIQnoOybb>_3dHM@L`wzOk%_I&9+NHjq|cFBcS!ulCod8T7idU{r~eD1{3=YxRKd; zxSmVx;$HpcPzrsbIRc)Y;Qz-q2NEuS3=OZmSFZa0+i`@JkFY9(;F<-U-QRr^=A#!L zAPN>T14X6KIMN`R7X$Egqn2?IamlVMWyCnIVV`?%)9fDeBCcPr#|`P(`Pch>(ck$} z-9*rP#9mZX?J%Tu)%->SujRf`|HNO=Kg3Wp4JrnK{vtx?9)a|+1=hJ;AWQ-rGzAln z5-SV=6nt(0+QN5$3?rBN=G9paGWlprirxu1#k?& z72-l*Gk{KwD@`c7zr9k%iUxp7ZL`EyKGJ-;0uTyN8h|H4rh5%a0Px%LQWhJ1rwU?* z95%HAzOzL__%|=;2gncH6!O6GAmC{jKj8F2TV=7|D&R0cE}YsG&FV#o460nq^$8k>;Q3ElRh6(i_O?U` z3{PPDMN@Try*sF8aM6ZCT7NJ2(0PUhuWCEvyv+jkVgx_ns=9~ah7e=FP!8OECH!#r z_`X~&cRElfu~~x$06{V^V$n%+_@1yn1K^FANA!)>unZG*6#-CpN_YCcxpw>(1Hiac zB|KQ=d@VF#$dxYciZ?ttd&n!5!8r0Sp_xJ$E_l^enY+DXtNcPLMf9y|Ek%krB)a}3 zvG=|5Z~0nT?EIT`n<}3G`VDvW_~0Mlc75f%;Lu_ya{Uk`#ZlMF#;hoDxG%6fJV+U)jHKG6*CcIJ+TA z$NwU6ZxfG$ftt90vK%J|Uld;!=E#N{r*BaYu*7}HA{-hQshYv~QDue63sH{lD*s~P zz(gs53Fo&J`9*YZ#zoZCeBW1q zz(c^SQdLR=JntodXo_FHRNNMUn8yX+V#moAL52jh=UYONf8hwrP(FkzB0_1TQv49n zwB=DEY$B}f@Nbt-@D6|nbOSg6fx`#DJkT|uh#@l?xG2h_1A__i0P5-LfmpFy_3Q^Q z0;;hka&VzP=r8`s=pM=wvtaZ(2)}HbSD1hQ!_ZWD zY_IkEq?hE_H^PreEieBqSQB77fMNhD7y_Ruz-kaQG?fgw{4vy3Wn!ib$C@FE(Svx8$27V6Ocji*9TfD(z)p4z8Skg+&%ZU>m>&zXbMo!=Xr_JJi zCy2b65K&g=cqIj@)pz%XM7{3r--J7Q1N0xvyB~>f@A-*;*`MWGZ|D6BSE}BBrF>VJ zQWx8REzlVNpWVv!42^giMoL;Aq=O>it-{sxIPv+`*FA4ZKG8y}(l*=blh z+v$DGOGn~8bgaLKQPwlXW)igY$n{P=>yARNdrwDINsg}_&MBmx`P!g1v3=;#|Ae8ix&%8>ar@dDbq3!}iZH>F-K1Mikn z(9G-|i6h}p{*qcrelOYV{#~!d``A0G*Ds)cOBM3|P&awE=1Y4ghkM)_RUg@&_5L%e zs;^&Jrw1Y(*heDX;An!CCwjUY&yxE%1TcQUcbuS1c&~~iS3@ZYz53T75cCD*CWqNK zq+vY|%Ud0Pi|GiF$!PI2c<@2DtBTlq4L9u+M*j#Oe=W_6`B^fiGvOKoX^(HrXW+4I z`wDp%H$K_&-#;;YG1{0?-rQf6HwC~Y5Lya%5Fo+uj}i4xjlRGq;m{YOfnMhE@1Nez z3PQdJNnhmet*?`mYvd8%p+$et_iD%4EAGF9 z8>>fjdt7A9RE--{X;JokuWw6Eb~MPy`VKCUQpfb*Af1jc^zGrHhfjXh{NL&J`0zkY zdAu9w&e&s6?J>4{^5Ua;oCu~85{n}>UL<}lUukmw*@Q!loF&LFrXX;(95>KbShD{? zYnK0Gp8t#P`@R^0HQ0H5y^rQMZ>Cfu7B*FSVTuH(>i( zm`gvJQg(GmAxO`E5KBJK>5ZU&h$Ew) zcl14ykf!&${d9l(5ta`@kZasT7d6 z$Apg677^nw%`g=L;saswcA1rv+o`nHgpC{UXc(ocr4t4Yz3z^GPoomTFSeDc=OZer z39y)Xo0bZU*k~Lz^bPUhEr4gD`UD`zD|Qhsv~;~HkHx;$`M<&efI%lFB>Wl6>ne6K z1;7`9P;XmF_2)FL9l*b!k}Gos1*smb)7cK!n6&Zq=PHsM^Y5jSjpIQ;I?0E(a%dOpKq_f z;RQnP=)NJh-{^&1E>72?l<*HH$~Y0r_(2vH1OXj<9{3*s)pOzEZC0a$K#Uaa2@ec- z0%tfH0}41iW;6OV+-NIX6%N5Vr+K|=G=3u=yh!0qyIO1xI9yaVAhg%S1wa}$1-*;_ zi6)!|REH6KLr*pz*@gxq z&0ug*^U|J&>}=;D`zsXY5=W}mhASs>Y70dqA&0usvG>JaEXC}198o3r^84M&KSlPP z_y7PCdm-{j1wR6T0S*sdG)I@bJqe)eWfmCaj9ll`e*~fmk1v;XbY?-K!vU~Lw?1S& z$c#l}rhqpJ=fHfizwgibRIr6x&WG$zeB1{MyJ>Ri0{AWtg$3cn6#@XegdV-V3b%>R z%fFB%ET!OXWdFtm3&5sJ<3xQ_hx`qt@x@%)eiGB*D2fIx%cpeTZj5P6_F zv9POv76Ji=LG6;2Zx9V!`| z2B<6BD*Ey%-(0-g+r|e3s-uFKd!{tl2a^HQV0BDrQXt+K4Cr4fL?UbhcItRY+UKR~ zn}+?*_Aa0oc_8akQGm2~uEb+%Z;ALGVlR*2Y6xj-0S6EyS9q&)#^?U45#l<)ev$); zk$3nPoxrw908h`~H-m`0)*&arMb@i93>8-g02&OdmN?=UC;%17ZfCu;2SIfyhRqQr zFaQCfH5^KVivb|OXkwD6lmJ511_o@+Y`}v*-^X*FOb{TP4IoVqpCaQwq9xsanxH2#2?!`hvur;Ux%c5OB9X=#FK*h4`6aiQp ziUi+%;~lUigAjm_UZ3vj|K+S;bS4TwVS|C802ZM@zAchgt);K^xBTI55E3XhU;>9f z%^Cg>5)-zc&5=IGKraP10Cc<6z4aafWQO6@uj>IIa^22u;(rYLRirE)qFfR#o?iiV z5hJq4_zZ{0OE42$LivPmf+pvmuMoZ+$71zHAJ8Tqg3;?OR z6b}QZ>C1b@;ETB7ssF9V)xFHhs}-rQT@GUE_$o0{hn%$CrQd=PfQ!BO@Q^3d?ZH&J_zutw=wh3a;un8jAWevhP9Stczc+&$ z;r30zU}s#oWC~4+w%H0is9KObdEGjyHP9ar#|`aH3~!;Q27#L*{0F@gLC zF7R3~8o^}m^uWFW&mWl+uQE!?dWp(hyFiGKmchd=U=*k`Ke@pRRs@52*$vu z&4L)$;lOV3a6X9;7*x?upzQoduqcV4fs|;($wLE1kFc~GNG2~y?+6|;vy7P{K05?7 z+@L=5-u-G6p^#%UR>-7eAna_G#)~wtEZc({i?P_0S`b6&p_vs#f>MsQg`_qT4eH!- z15)_X`G@=eN&xp0(p*%a!=)EUd`s^~82s7c~PKU~}3SbO7!|X_$ zZLVyyYWyjPl5M|B|CmQReIhVZ2*8q^QDDwNdAsvw|CsqgUcEVq{t!57K{xxjt`0t% z%UmC@U?dO&@X8AcxLCDI_nFn;I3cT(k`lGpfv&F1osYP=dIlv-6^IlNF-Jj5tt-*? zA@A=0NB?i_y_xNW0=%py6Z$AEBZm;NeX}zE-H>4gMEQg=rDiOmK0IrW@=B9T&IYQR{PE-fq3%TJW+A!R74rU+eZ68F$|;mmFrL zls+K*i%70(*I^E@_L>K32Vt=t41#JK1GsEf`fPvy3c&2yZ@f75gyzHG7zaP8K`%Iv zChq0qtccs8LtVXehI)iqz5g-fJ6u+qUQPQ4X~e>3R1`pJdNwg-i;7{t_y%or_5a0& zJp<4*4j`0s1OPw!1ObqCBLn8woB#a=VCTpz($Laz=2??A9{|8M2q99-78erqh#cr! ziz$dbg8_pYz#Nah00eyB_`^Ti5k@%sbPU~1Y}oF=*~M6hUMD*uznKX3 z@s`%4AuY&;EGGWXAFr?!M+-f-hzP%LaJNpz-o#sp<|M8(Vf!fx%MUAu?zL$C;7vZv z3dHe17CC>J?T!hjOwT(M*Fj|%m?!*atOa%YsqWW;kGmZ%SlsjK5e!w$?o2=>48Grc z?DzMa6YX~X%XCsB5Xh;b1v|ngDS-E;NPn&l=C@sIxjJ|?0{ge(^dV*9Q-ZhU@)!Xp z0J8v304u?A1i*vxB;)RXpoExBeVlmY5OoJjzdi50(avbm3UsQA;fA6>vC_HZ8x4YR z5@^|HB$9l4>4^&=GZd_xSZAq%YV$)Vj=%RnwVbLO47ej9#gzQw|gCfrz{K9V2SO4nhyJ1rN%v4jI!FbocH9XyC5A981R%W7V-aX@|y> ze>A;vyUvQA(9nn)JR*du02$upkDsN`f=8xb*U%{qv)j8Z6Mnt=)k5_0HAPX8(Cs9Z z3fQ~-cN7-s>FsDryUL=4m0v+Ug*gTS6bU|kfju)--=TmW7?!)W{%!j#r!YtV_g45A z>qRE~&;KS?s3@%u^G!N__Lg?%p2Y3;ni zo`7L!V z1m%Heo-dNHAcrpWCx`?$jVhHnAk=*nPESyQ~CKvM7wCz@!p#g_{`1iEhdI$LQ)=bj$a=LZ; z*j0xK$CZDlD`}n;ZVQ2c^v*UiIH8e^ltm%_H=(~X&RdVff`&EiEl-;fS;G0e38KZT zGZ}1M^7khz7*JNva$zFR+^R0z8?hIAN!T13i`%nOs#ajwIN{_A=P0ojRN-T+_=hMu z0eunyi1QM@hcO2(kJnO`EZvKuuEHnjuVd57P@qRlANIjnh#Qd^eVRQ1Bv*;roEx$k z;|qH$v@|EnXAvn56fTog$BefW;hsQ^<~Vd>&~nT@a`W~iOJ0x)GYDNdt7AqQZFN*L zt7z*<^(_U9bcvS-*sgjmaBufrzpvE-1?xk9I}q2;)tQkq`Bqe_!W?oM&ng%nY$<>CH#fjKFD<4PFJKrwS(nK#b_6xP7nm;h#-LU9f=%=0nx3NHXpUTA^2ab z_>Y1AqhE&eOx4Lx3IqX1)oVewGSJ8of?@7|w{FE9Fqi$gpwJbw1}w}Ip;u$IS$K!@ z2{f^gbV5HS-NxyAz3de*R#1mkP|zE={XKj$ck3RT=5Wd*(HCrxb~`)4FY%9OQ=gQ~ zRq!kg2X=q7Odbem_N`VpVc3nGx{w*S|VjK{VZsDNO&&Cie-{e~Uo-KN}DZepJmLfq`q+LixP(nuiMQW>*`_j^=s+z0)kKbEF7tMFqr8|u7(M4mSi$BfTPjB_yOI2)qlK?fILa`lOu&KQ?IkY{fy{W> zep_uGw86bFy@dctB8fw<>*?@8|IaM1f$`E;v34QMWM&^284rR=DAD*e)*k(H*pg@t z{vUMZtLEZXQHhUZ&87Pp7?CVOysOLS7Nii6$L}%jUR$gT7KmOuUul=QbplZVGX!Z6 z@TJRp?%=A$+8lfY?w{VXV?N(}jOtzMp}6paL$?6tX>DL&e8tcAU;x?&QKhO_r{CUO z)p=pwZ;=IvrLlpGNMWP~1%mOw=Y54H1_ZtGw)`r@vE~{qau6MZKrxZrIC>;BG=PSe z@#NaPOPx{w9yG`1apcAAY#<>qP7Wgc4I~e(mJS4nhCBZ@Xy_V8iK}Vb^!*n@1QK^ zJhKP?j|5_P1;7BT1T2YzwpOrH1?(BXoee{1JpdKaiG%hh%@_y}d9@Ke!nCTXtDv?x zPTjin0C8GF6o>!sA!Ng_Q2(*n;|}icMc;4C_&@(aY(txWxFtZgJD>HfAH(tnI6Az& zhC(?4YK4ptsD~wSv)qdr8Myg~zHg*Iq6`U_I{Mf^0+QH3n{aw!HAFS15-|rev$kT1 z2q~@fg-66c6@0Nr&ctN}N(z(|hY%<;zV(3=1_8OS2VExS|HbtPXNM=_z#B6(e|C7?8@ZS zzWHVm_`W&#e~$%URVn#7usRss>?8s*aG;1C)(X@}tJcR8mdk$?;_*%j7{$$I`ceSd zp#OmS)Ry~p=wjll;G=!{(@t>EyZ&RopO>KYp@*8L4e5{*2403q6<@^j3smQVgIt~6`5Fo@6P7y7GZzqus-A=v@IN)Hw*!gFD=#9JHI_gfKZ5|PS8q5tpUrkaJGk;giHvaok__Fmwm%Kt1dac&81e`f5=mN%t+{9pa zEcCb|qXM)4ioX7BVyo)Ch1&PyjhRh)g0#dU#4C##_LM;JJ~}KVYtv@wDo|kCJCAlV z_YgSy0}uvy90oyMcfY*xoq-qwz6Ib=6s>#ryc(sHoF0eAch`T654!no@qnzDDoAn! z`jr90fL%_@(>wb68Mm?-A$<$dT!VUT>4a|XE3bcbUl-6c4r`-@GZOznAhZ$-@qHRS z8aZh$Z%arc%(+xxbxi@rnh&|?|Hbtc2Z|uU+>cuEeS={0k$#2uK4-(zqStwRERGT@ zWgf5hzrdI&*T00o&J@R2ld66iPVb$Mvc3<2e8A)ojv!Q6CBQgc%GWR*_JZG3k%Nu$ z!mlmWPNnwVwkOlZE-P|$s_4Q+8gbnkFO^21WIJRt_&^)Z+l?0UZiKuo6MA z6he!M_NRTR(BgK2&O0xI@EGC&2%{jFQHpo+wQbRhXRBU5)BF{Sla4e7{Q!Ja6QHE-3kY>L>R#VSVr?OC@dTk_q}L{ zm$#=fa^ywCC1Z-)ETCWmC@9Meu+%S7&!b$uguv(nAV3HJDv02-FRxwWuyqqKJBv6y z=0PG_MarWRzyVHMxS-Tf!GcMYQxQW~Z-6?LM=yd03ORgWejtrk1IJ}ScD z>_~J?-`9>lTH{JCNW@W{V|3Kt>jOY+foi5P^uE#tt_}{WP8m^0ao6^xgd8-cz5E^i zTUM!ER3;~7Z3-|Y<%lhV*gsjnH;s9JVk9I!CGGo&kk>I559mc5rWTLnlLM)x)`_*# zQ4Ew1uh3jjMhHB3YN`g8Af`BlG_%~86yp;wHRsT?WLlf?!x3?H}upM!1*QGwM(F zft7KN1A;Twqe-N-j9v4QBMgxVNB3@lD0mJ~X!l1+emh(lB z788gd;(JxLN-@umFNUfO#)Qzh^&Y{nV4c6e-h>Zy{nJd#{DM~abiO60PUutilCSu* z)aLsai7QZNgMlvx0ENIf1c3D8n#@&(W*~SEb<5VT9nCFSHnS6ee!Se}#J!ugK zep|_CfFV9+aLkSi_yhzd-^cLDbmpUzJ#^ilz|6eL-g%zlc9XY zLQbDuEWL5z5XeMWu)&sknvehh3zs4EPzAWCuqkHzZF8cX4dCf!CxEj=6WA{jVAA<} zOy;ZbmiT+@eVeWt9L&adfYn=qLfjcApA!EZt@&>c0|%gZ6qqPY2{%D;t`7Fi zGuCdVKwtE0zLF-L?XtUK1q&P&2h2=o77CHyj<;4ZrB-@=yq!vXNcQy59V5~zM@io* zO^ydrX&7q~LzRqX+&H~00KY&$zZ@Gu5T=gvt&hz?!*zV?kpZV+fN(YOp#Zb7Y%3jE z|Kb7-eyf`elu8l^mTcv50&Xx(ay-}O<5$&4Bk}TyUL?fWUUrS%aZJ1y8E5Zc_!NYr zOYYkDPrkbTUseTO!7O1}o}=8#+XtXnFkBL$br%=b`hz)nwHP<~?nu@3Tz|01u=G=p zcusS8BM;1|M#&=QTR>}t94ex0+5LnTJpE(JdvTPFW7xO_G%yKA=9zw^nQi(evTn)i}Vjb%_*SF zDli}&v`7qQk9f~6({#(6)h7lz0cx9f=(h?ltMfI@D4dS zO~mSIkHj_Ahk-X87f43jFn?z!pFgXbyuQ4LBfX3Mja` zMC^Ebrj!8Y({2NGK(=O77NMnu*%o}r89R%)jhz}Nf!s=LFF)gP(b=YPr)5m0QVjD} zPzAG{o7#=bo|EY`CLkgaO^dcKU474E{l$MT5Chwy6O*Y(j3M{PCtt>{4jLA^M78`y zn?0GS%s>Cg9)pcJqJ#VPgl8n)@N5ptjbdu6b|CoA=J*|Zzfb(|5pRQpX%x3v9 zNRq6}RKbA5E--!gf$%;WhzBhjQ%!@W0yvQhw_5nWr9GH1(D4UK5sB(a5k5YGVjgpe zHU*js%FZ`;Y6QdZ& zBs6F=7-8WFBS*!ye+OnjqB)UO+^uxRq7x@(Cybi(;s*sAb2-6UZTVK=v11}QOuR=0 z^2X=Q-K^3%K8|i!Q{o<6f6`R+caPF(qYUhRR{o#cK0$AOLUNR=zBIb&1(qXNf?ulL zOis{&J%iXh*0FmJ5Mx3xSESf8H8QVo+MCg(fVS!j0R8Wnh!OfxR;H04*UT8KjvKvA z=S%6lW3xNrJP7x}8EHSW3(TA-h8i*#%dn!D`j`A3k$14L%MsJ(wJ*>MLWSM8)@b_I z{3Tjgy>R$F2t;Cgo%;E0v9%p>lE*?StTuYajS>AVD~dO6-&&T%VyBmES>+&_{#xmD zFTLfV33$Ft7POL^{~xeH(WBfW-=vd2$=fKSC86g3Nd1Bld{?#+%FowRWWdV55vl&; zhmvLOd5Hq=PFch`5$E5`xT>vj;LnQOS7@kP+`a$i5F%ITB<^J76++<2%2S!J7z{!W zgr|)xwd0#l90wJaJ`RictRH!5$AIoqkOue!Ac!P9Yu2bv2Noq94DJvD5<;l8TmYt= zLO4&ufS)M%k*}xW22C#Tu=75#xGc~U;*=Qp#Gf{?4ksJwjPGkxuN)Zf_SP!S#vwujR@A6Mbc zeW2A0GCiH-#-G~aV~^jzY4R8z3MMY&9*{Q({^>{lblftU%+6Fbl-5lVoRD62R;t{Mvhh|GW9x)O1K8Yu_t( zk{_}pRtSs<^xX~*>j9K~CSNHpJG%Ahmu|kQ#~RJMU$f4K+4gMxQP3NRG9aZTqhRDD z49#+(1Suktt#hFyrRV}W1%cquNK|>kal7IHhl&FOg~Iz;aT6B+9gt^04U<9BL6kWg5}{om3EDOFZ)3hA9^pSKwZ%Gr0l^?b`d^j68@L2OK!8?euLclG z95H@@G!G+Cj&uqwf}{Xr+?5AezuiUE;|`p*BmfA&OG5Vt33Cxi;P0O|?)1qWRCfW< zV)uiqI6Yh)m#srOS`S-;0WS|4)NaK~q23 zvrHdPm&5`Ry~3$L?ReN)us&1;<}?c05HU;7I(PEEP|$;f;lvgMPwEXA>19MaK2j}I zGlCbr@f*=Y=LLec!A12(dniDpbO-X<;}JG?I6N>#QuFLZH?iVE#tyIWiD|s75s;&H z525_8MY0*8NjQG3OGWkKx6R^_RHpKewT*Cf&RQk z!7Vtq@e2oRnD9IwK$5p|lJK5Ad^+6~4Jjf_9(ZTR(64qow$ zzQTL=rF56X-&`SNk5PGZ%jTvbNG{V_uf(RMH=0xHp#Kd3tdjuZXGcqkp%~&cOm=y9 zgVs}@0ImtEc5Vg$4G?8Snm$=mz$9bBK@bFD2*H>v1%Z&kL!~Hei=@inzc-+cSpV2d z5Nzerspl=G4tVcH=ci|x+T0pnK#zWdqRkQhAWl`STRZ{kvq9VvJ=g`Ur)0|uF&C4O z&;HwF)4MHZ27u^XpjZYc1|eGrB|Y9Z*UL2AZlgYQ0DK*@<-ZB1JVb;9G?YBE!B!lq z;ZUGVpa={Jo!EJYYyKv}>5y<*Ww)<@Z28T43D^J4#1KQ_ALo6g?%vk%6saz2-p$knOy=iqb-H7slS z_uK5B3Ff%$RzKM`zRfxhPhjeuj_ ztLFP!z1=;~a3yN7-o`?A(XVgt5)pkO5zLjve&ZA;Xp z_`*T5+G5-KbkTqIhFC~?hgEx$=qWKwx1R_Jj?hBubNC^B`|Swk0D-AgPcO_F|M-jP z8kw5rS%_6-;oX$q>mr%0iEZSr8fSU2`#RJ96x&XML1-iw{6sZNsjF14g#vS#LZs6h zkyy$M=e9W#xicfzcg9cMD&3a%yJ4eoRPK|$>x>eE49=By7!Z0v3k7+9PJtPPDXWZE zdO<4PSBPjUIUA_0ABU&%&s*=6ga3r0LfZzxvuBmyG@}A2aO)Tyww!emITSb)eyAQ! zpx+0?pg%T0|6+LSUrJVTmbKx8c%zv_5tK1d@?&!w;CKXW{=OnG)l*OIW>(_L@si-l z32LubWd%9AIg6Iq&T~NxF>!fpG&IAmh{62{SH*9czv4eZeet%8aN0+HTfwd?TyI(8 zG6=fRGLi@y6sATB==*zexD_t0rVrMKq0 zM3g8Q8$3YM$z>0D0zX5Y<(aeN5^gcEy)D|^#&UMJMAS%hICilS_8f~V^K^M1T_D&LUQJ*bN`5-SI`Q)880F zP_vL&e9(0)ErX!@{vd;+5%uWMR(>!@U22a7Ei`XH*L^u~ThN0~yh}&Y9~AH>!YFQ5 zqE_1Ut=Rl=5Hk`)i;?KV)9hgP`J+sjLH>@VU{&GlIQIdf>t_S?=8#eT>A&An#s)U( zSN^-KA`4m`V>{Ashs@UZ`Fs3l8X!91uK}*h`oOdEyARpD4%0)k1Q_ZlzXpx#TU}Fw zN~Qn!V^Vt+RzKoaru@S_xI6J>Yn6I=Wm6DSj38y;eDORa?Xe7u=zIP!v|fX|yk`|? zKM(JgzAXunR$~6&#k@Yhg@*T)fmevR^X^#fc&H2Otz6z-+BdSs6MS+AgxYP~jPH%E zF-7SC+Nmk;R>oidhosvTNeWzJkPL_ z|K|{Fyb`iK?Ej4-CM~dKA%GsLrr`PC-EEEW|K8To*VB&1OzoX}*0}T6`47oXRMUK~ z)6E*dm)G%AhLiZ#!2h2T z{B;rV2?v*Z-8bp+w{wezthLSmzi9l2Wfc?(wZ_mtoU;CY_D1POJA5m_@wwnNmECf& z{~5mbxshmo^4HB9wjYOY__^n0W-|J}NtCu+TCDMHyWo(*o}0NA?hQjh(JF40Y?_3Y{s}h@}Z}yNNVxtekdB$U@h=ZE(-oxKk)A;*T`I_AokB4&9dNyA!=o3L0)kKP1 zX`viGqf!~$ZEbrtbeZPI|L71-M!SoN@i}`2aAkWJ2#DzHe%J?;9L++EaBEZl^s#7K zl~aa8B|xZQL*WE;a8emJJkZz^GS=SuN;KO?&2O`oIFmV?FbM~4z}iDZHBD=w#vqVb ztEC9?QZA(WVc7FN&RXZ#nPUY*jd7u#`q;vhf)%9h-_H5aNyZxVFN^xpe`^0bRniyV z*}|FW|Dcds2?U*F*>;#lCWX+XJc=RxVi(Z94x&aSn2-&gS6tF-OiANMw4mX9Tyi|q zCdcK$UtD$~S%fO!r)J)1GRh)Pk4<%~x59_+6=taZ34*~0HdN+0BbNQ2T15bi0P_g@ z--_=or~+pm7deU-CZ5AWv-B zmTBTx^?$8vVKJveI@hI(?THW9wcGEbD2aD=o$z3 zjV*&<`zQ;RhsZ%(6lrfO%+AO1SThiJ#8q=)vzUF2y@{0wG&r>m4I3W;=4Hy`zQt{t z$5yi-uxLcN#z(V#ZkT;8-T%kt1c44AUELT-KM>|)KHRzN%8Et{9tlduK-p}i;M2`A zTi=Y&m!llDfJ(qy1xEx&27@9+0t=vuKR|>u1)mr7WQ;@8@~#&+cz9qY$5_Np`##Fh&Yw#%G5)R-t~8Q>R%yY{YRZymp^{ku(qM zMPUL9V1IZ&UW*&)VE*(`L}2R4vHQpHpK5X@qw@a`%B%@8-z-xA=UxCz%$IL*G4l9wimD=+2zwgJg73{8V3r=9=?Zeupg zqvcr3b|F28a^BFrwy&8e?AVo!fuf%f`%qa#M$Rm9_&)%e2w>no3Iz{A_V50*lH6A+ zSU}?d38re$14*lr=p!zo*)Eb zSE}tn))G^pH4c?PIT73H24E+GXceXSat5BjV901cTtG&J7PAe}x z?fz`P98iEN4wNbj8F)Fn3BYP!ulVvnanHMa(#D4sla$5R4|2W_c85}C;76zP0V z3A_Cj7twuuf+&99le2Sc?D1@2P&8rusEL9&*3QH)A=AH&W05eK$%M~+xIqbFuCpzD z-z1*gsrW@-R^H7e#48B}MvTOHYcrb8V~{7BW|zu8EAyi2$`ZA-jT^|-D_tvML@4-N zLT-?bWWltXKiijVfr5)F0U#3t@&l6(i@>@{<+DQoQ__QZAn_pmg9Ra?^yOA4$v?{r z{$BzBXc7lN)&*cJgJ4r8Lrh|`%T{;M2OjLq&7>Mqh}OSulvfb_>6N7*OQbso`;2LYj#)ho@ywL?5NCF@uQ)PohB`)z=L^46r7jdrz$d$1@i0Y?v;8cN5)yt{|UUWgN*jX)<5L z9tJT|;o9(VZ`nWxvViuesBcR&TBLix`yY8KqNM+MzwzURMHdmjOTz#oyhF5@0{8$+ zp@CpPNrBpQ8mRNHW{4Uk5rk6ze99YC4;EO#Ki;yJK9TR)`pl~gI72s9Oy5+|JGLQ`OFhP3J!{V56VMyd+9`RbY zg3;Otlspj_C>SP#TqeGeA9&+-zDa!4w(H+(wO?jJ7=b6w6dj;!)Xn(RA%yQw=}#k( zVErKpv<&2_565l4ke0b`z*~htLIA|TMgXWD+(Dr5xrF#AJ>y^MJlm@6kcCpU+;@3;P8EFcjrLyL>!hUa)37e(<|6MOv0 zYeER3XzZj7BJm;vIlUFBc1CcJ0BTcA((K>sd9qCF_ava>bkbV?Hm2i-aR~>Vl)7L2 zVE2OPY_jW*KI)i0jNbqJfTs4>wM3el4{&@TfDrk!P=s`W*Pn=aFv*Flm?#lI61W6N zYH6h>#5m_Xuzga-DE2>QD#7n)0UCfsFsTJ*rjU(;z821d16$%wpWoPNPQWoGU11a- z8}j)?#iWpi_EeltnER49QQIy2exqWYsvNBH={I760M0=KC_z{iFk>bGMzpo3>)}9X z-sPD2ebnb?MtHXtxU4A{#a-H>cje-{6JvKK~+$mky`<{}YN8W15&qd_E> zaa+9|GYh{!&_9_SNcJNuPH2*u4jO3E1qF;<&uG_{orC3kP*fC#0Nw&rNPsBA>xO4y z44-RMe}M+4HQZPJ9!K=${{Z3v7!05x0|GJe!QM$0zo(b40HOgH0<;eRG9ts0U_kr7 zuj%}wZ;0gAp;4m2K`$SA0dyvMK1YT?erlgDo2XV0Y+X7&C`U{V7}ehjih$#>EW?4m zQ_tYofQlIjmTOWW38q-IKaiuD7xC&oEKoLNNJYuM_eCkP)88)F*s|c9<$QDH5V(cJ zE^mkSk_upVsTesXS>m~bQ|VMmR_Z zHx1<_w+$BublfME+wMR+OQ)ib?OU+>P4MG;WGKFX9vA{)39w0U1N9Z4SAVImbh$oV z$DsebPbsg}>!`6}H5WT&c^{UlEO}z$q7d;|IW)PqpcDTxO%MWrh>+QwBsYanDQ+LZ zwmY=Y9UwFMb7p06in|qeJU~H%vllU~y#$ME785CU`*;8V3veO#Tm`tOJ+bib zy@%jd4T4M$?=)t`aTe6(mJVfwZf+v|1MV&#(_pYe2>D|vx_Uk=fSG9Ly-J^+WzNj- z-=kmZqG0ecD+LFs8wb^ZJsi29g4uYMv_`eW3MOJ0Z`A_ju1&85r=fUx2p^#Dq6!dT zfmy;{Tatq01cSG7Y&6wg`&DwIEoJM1h$Vt(Ff}A$H_}7`I^^<;vR_?n;G*~O3kOS- z7hNum#3!IX%dE_Q)kT-mBWxK1flwm!Kg@8yTILW7BoS|R@O5P1I4^#W%AT0j4WiaO z!~SU)zo{(QRb}anE8gs1cljcqWRc*?rhBPyb+g6+O`M<>#F|yy$U#kgfIJ;Rj)0i zGw}3?JSr~1v-nOs`qQ9FDBc~OU6^0WW3%>OT!l8r>wj3Xi;~iS&@`wT2MT}xVK9

13|?2--^^NQ2=70dC%hj_KS@0IYX6pbdt zFdbCY#lP8!C)wf|H{V8QUHxc?g(}Bmcl{=@4qU0Cb_WxMdw+Saedu4Xdj?`izrG0| zsa^WDT4?=(BLa35TVAg5S-Z)DR@)>gW$!OX4H!3#6a)}-9K>D@A7!-1`f}K6pSdxh!-e&xMeV4;W94!bb1|(J0=rIf_Sz&thtL%72D|6?*{T%S{LjQj6 zE!lx25DA$J3W*XBTenMZ9BAmJ^cKDQ`8=C3CjL~uy1Nk}6c3CvC}{i-$VP);_6)?l zfH4GEItGF08V7;fdvw2uuxuW|uz$PxZ^qZf8ZHu@FW$!l1%(GXfPFg?rC+A@FyaE4 zjj5FY2E`+v{lMj(cKKn8ti6I@Rb-sOv4hLKp9|_2LMnO+hbIW9^%(nDuMojby8DQ5 zx7=d|o%g=tzU)Zm@891qLa057jY6%6vgI4#=v3|reQ)bjpT(}n*b-Jx__}JM;SSd! zA`-|+1BDm=q?VS%aF?Bn^|(R|WEvjF)cqvAnhj#%jI${F==pY|a3_Q*Di@=(MvW}r zYq@1P_6 zJbhRN-~#$HYwy)}zD>8`2d` z+)Ls|hjPGP(@)RTc^J_99UYJNR{Z^I=~_iuhCly@pmDZe=th~Tnue$1EEuRfR2Zl~ zaS=oqJCet8OUs8wu3??_y@?tNtoZ-DxA-JVqscih$7K5tGfEKxqmgy-ARGq}E;$Fs zD~A&$CKrI=FM~;`uUz~dwL7pW0mutLM)RHM@7NY^{y^>KbK>O{NLh-%o*7N@BuxkdbgJApIuXG=X=+Wr= zk?+((v+VoSLa>_l4pr;K6_kF`CbSHsw=PRfNm(C2UIxMUl#xj_BZ{!-1-#;eUr&Jl z>C7qs&Hw`dyo+ocH{j5@sb@PS##ZElrw?qzIX=YYg(92>Fl5x=2Z8%ouNN1)8VZ7- zBLNIOUDaLtp)`NRxI~eM@)FE1x6A?br36`=WDdJ1v9+E!J+j*ewJ#7vR>@GyorULx~NLqp>NCVunmCMVjm1=G-%oIH83_ zW2}gQvfw@8V+8DRct6n%1t@=zdbhIt1Q0eDG(N{a4tg*4UkI#2npf2dtMHU%4S8I8c1)Cf`K�&IhYTg3oL;J{};dnxCFoZF9o z@&$rk)Yz8!hnTb&9E@%>4;pxMDozKAENo)tz&_dv8^yno03G9=o zioSuND4&7WyAFbj%iz)tdyTi=D>1ip9sRy7i+%_AA|)*#%iswt0yqm^pot^Fv*J<0 zbt+g3AUy_#9AI23#l?s%3>``7E_ly+`&8u)PtS8ProS1R}NlwoXswC#NlX~<&qx+2=F z&W^eiWQ1~2gVlRQR$A~s2>)VGRG|`5bJBs}?Mb$&qc9NE<(KQ<=xoC z7l@FhDioXp1>L&)E`R8ocQ%)zc%g5Glg`$~Rq+BBF|05AbhsD*T2Us@^O@YoJsZ#( zSYUvkUN-+@{gvn+tZ=}5(+BXbfsk(lis*R!{eCrb%r%Ij8zF^B1QKt7p-h2%U^x@- zuqx(M&q5+iu7#!78V;Qd>cOQ893)3z+ z#r#|Avu6MR3Q8gPUj`HED0zJi(}s+G%oCw?uWEza9 z_>WUoq{Zm;eqN-Kv3-lo#CCk%Z#M33b7j&JVTQ19Bvw&^=d&AuHr8r7pzVj_lkj+l z_D)>G_fOvj$QCG}P&f%m=r{zKrMsiNpsY$9L1)tR7zH8Vz{1>CDEGMkgy>ZY!?5^E zqdkSeSXGLO$^$<*IPnTGM^NAcM*%<(^xQ^A1K+O5hxw)HT)#%|C;7Vu!ICw0u^wUn z{{$S_=wB%kk;+A^-Pm?5P3`y2iiL4TDDM^th*e)evnyCo83G{k5ilhj6$lSjFp282 zP6V>bd01Rn%|XDSBP}MBONFstsI9*+po4yVRw&*4x52YHMZ=66k4x{S40OesU3oI= z`m|S|NEiKA#9C`>#3;j9hil2Q*Abw42l49=PKGK85x?5#uAhbi&DX~f5x8_v;X>l~ zip6hteI1Xb(8hE#-S2nZ{CsBjOjh?5yNcb#bVfKD35qdZ|2!$Kw|rJByO*Q0Qqbam z`V1*`>Y*;902q)DFjf{~> z!EECy+8p585^MX}&TwcD#c>1NmCgEi0DNHJUkeS{(3UPB^r8m^yIhVOEFI7&fYJyT zpVlN-cfO;B8-81`;1b3L=oElOhB2rk!sKLdKpw2PPy5w(vG4XndG2rge+uq=@A$rA z35ez*VmyNR@b{j_{n@)eiR)kNS8pW3_tUwx=m83W@H+)UF+imifyBu`;$c)AwxzP) ze8l7|MDiz%8WL0~EjR=4@2)ZEYm}5G!?=5eagcZdKd)FJgSQ31P8^F2u*)$_P|Gn* zHCj{>9S|X|{cukiQFRwuRTqgk_A~cd@yFvw@UV9`um|^c^}Wo?d^EaDH%ur11lf?o zr$2vmVm~CGHGSauh|<^Ufm+Nz`usdpr-*m)7yXIz1veAQGzDNX7ExGiWsV!k)!(^? z#s7Pe7Y9Xxm>$=Vnv}{yw-jLakrwP`X+Lp$yoOP{la~n!TG4L8VYcX~NDX6*1xo{Xa+Z6jEM%%> z?|)UAq&Ev|d_Vp0T2Zc6aoPzB$uWP0j!b((^1F3aVD3B^aOvW zFOr}HgaA$86NC#7d^lG4JU*lWE>qt8laF*P`Gzrn7w$>Kv<1)bpgf#06uXxO{g(^G z1D^?<{X7d`pmzRDi`RA?7zm<_OOL*9z!lxH1>M_9p3pc7Vdb2(b^hGRQOZQ*jE<_Z z?-1}JQ74ABJ%9B2{^BkUp;+WauOHB9U+EVm@L&8c)jTJq<8&G?w~cIBg8?w5+kI;! zCiNSxZN!gh@6F(t6t~|Ra(c_87$Mj}i#3W50>miW1EO}WL10*{ouQRe19MBf{f1vE zoR0F>%e$uuvk{9o2fv11btne3Hei&*k@;zstH4hhz#DJ{@DD(cVc0)l;9p_8n>klJ zVh;>QWkkg<90DU~Ku`b`{lU;*#kdZpC7EjnQ)>gHE1o{k6NAGBF|c(Htjk#e5DidJ zKI7pM3L9DA;IF~L+4cn?+usuUS$htP4hj>x2$ffkXNknfZUc)Cu#YrRkNm&0K4KSl z9AnLp>Vd~41GUGtuVsnr0DNkKEII^h*qfe8e~jrd3o=m28P&|42{^4X46Ex#$hZj2 zx;=NQ_z(VjSDAQ^`0w^O!H^fi@2ZTy1*kgDO$y}+TFpJN*wp)Xv9=s|>==Ln9~i zc^^y5e>UO*m*j?(?h4xX%jodP3CIU-Yv3}>z|az2zM?A=v{-1*npR!4P-HkfFgP$^ zfSERu%^$}uk7u{a-pE*#%;X?JrTYs(`{m!t{BZPFFy-K7KJc=F(*&?W%hD=P(TW)93)^?((At3Pg}D|Zr9 z5(P07QoDr0EFu>OqLv$pi2p$*)dL{T9%8>9nzRN0ZIf%!F+amUKwe+zK5uA+ zzm&_x1_mEWjssNGmXWhAuh>p`>9Y0x02PD-=3|3^B;epAz#K%E!i)M_aNoc*V*twm z2Nw@&y6~rUo6FyUlmR7cv&l#PcXyBiL864)sr7Xy^SusyOgpP zZG;cu50?c(bg*ho++G zR8$`@4AeIm(~WZ%6aUy)Ubq9wW9dijfmpy+8Nk8dUO6=X0epc}WB>m0u}^LZ1H@j1 zn+SA=>5KH#Jy79@LJ<=f2%D%G%e_)yI_f!(wn-!;sJ}0vsmArY|Rh)o1g3E%4Y=#5ojj zNV%0v*Azt+Q|pHN#eS&j53QU3K=c=ZKo$c8{?l5g{|XpWh;krdJ1$J+Fk9^pu=Wnm zSev8q0(ph;()$0^OUcKqWq_}n!+wsyJ0|XG#3*1HCjUx@u^3>gOJ3I8H%x7TJOMij z2#M?W!9g*Eir?-%mq(wj`qz?v$E>>#XXkh@4vNcP#4Jlr#{(3EG2%oh;uEg7zs;xP zjHG3|UgGTHw|;6KAheEeVu988It%LuU6)<>uK{!bodQDdk`c%Gk`P?55ZGt5y4x>U zJ&EW~5Y6LpIzs*X zTHO91h+@PdVi(gc(^rF4#Gs1oA7h|-1VnSQ31_7%sUqe(K&W{NIB2d2)J6EuoTD0X zpz1+(attGPp~|>1r~=~vNLYw~rTV+dssN+FA|MN(0V9E=9bu4wKoblJz=u?S+{ylW zsEZ|z<|$T;<7TRnM4XVhUY*OuwEK3;@Vq-_5D|K!fUsep$PtWJ^JDPKq@Fyf zV9-1r7!AfSfE@+}0;3Sog6O&_0{LDl?o`pkjI30A8jp*_06{y5}(76$w( zlMlsSMt!|yxZv97`J&bU5oq6SQX3>h6IjSDRGM3 znlL~8te@iT@iFfAcNL1=-!G$ziypv)2yc7n5&?870k&4qkPLvpB#m38V=DYs;NZ!Yqd`9`RfbNGVRY6jAsC`Wyq_>*g4!S}-DfhbLWWUeq02I((vGRgf(;iF3`sUKm31GMad;wGoTvrvQ z>D#e_d$^-kun5lgu62)S+av0Uy z>zKtU|I*namz0Hno-f(p?L(6BeS={0kU^vkl;66OLH%zYUD;BilpGBzaSkOa2qK2+ zQ~ZLp8%*ElVfGdBz{LImz)IIp6$Xg5WEYF)GY5N?gvh*KOM|f>9lBd0D6WH}co!Fj zBnS(NcyGf8E;;f#TrXew{k+S%E4$+eCaIW}Sb?xR%btFcQjmw1>;n`59|u5;Dfbsq zbe6sKz~lo=6tY-@k?hZZS%@i7vlr9DU30r9`md^o!WS11EEc^D>iqIv{7~{==kSPd z-tzcDCJ6)Kkmy94Wn)p6t^3m3>H6d$R`ULIEV~EGg~^BQ@P!OxTwYqi!@e}jPk^d@ z+F));L>#Qp&ve1vg5EFd2mYP&7pwq?V?r^az4kMXrs2ZrO~r02JQ&t!!-%iJLWQd@ z%Zry@AadjCY5z6{l?06___F96{2tjJCPUiyC0TO_exdm;zuccJ)&Cfb1|rcYjvUeb zDMgH#)_|ZRNDMpIJxdfA3h{L$|(DiqrgY%i)M; zSWjPE*$loR;cZt{{e@ICQ-5Fnjwy)=F6&Rf6U4SRZE6ws?|x{)xEGy@i2&bm92$C} zb#?FAD0HdL&P3&hbaVWo0l9iB`yw(JSL&&w?p)tDo}WC8VA=aja({!xD@TLvN6Z2> z0ZS-=2G2KY;P=a#)#~Yr<_M%Qd%@j{eXBrl9uYy&a@0;&oWuBlVdW!!A^5)&CKw?= z;z?sL%zd|ofdc#jHr&Rw%Rr5K2Xm{%3SxXWSc zR7qDwwaB+5+1Q%-cB_U$dF)xcE^++tE|?Kx7&1-_hywh0#~~cJF<|{7w&O!9N#)kZm@0Ye+fF(3o z#*Hl`w`P0h-};B3u+VR4b@Nz+f9&4>{RE@UegN zO%I?Ly+6~^7&^3*5i?PKp^LiYT@hb+a-MZ(V|^E~)Cnh*uyDS@#I_P7k?>wW>7EAP zjRh~X`_hX0GC>0sK&XxIg3nUiT-r$FmZfK+Z=!=W&aDK6)ls8KF0i=?@4}u5vZmRA zLIq|=vIhascohT;FjOPd`CtM3WvdBH&S3t3Mnt2^+l|Rst?av77-|V$qxYQ%dvyKL zUSCw?a;-g_^90Yb=f-@0_YhP`$$nHLK>7#ET7j2t;v^;D*Uv~{%hH+S<^DjG8JmG{ z%yT)t?MQ03`IS9PL|e)#YG%9t{DEj;$F8Md@ESl2S{N;$sjctNr1MK!EB8|T6(JFR zl~m#N<=MUls%Zu!ZBn+fR&Baf#|i*Ni~%Jgl&nFb9BBT2a?xwp#2y_8Lg%U-*>QcP zlxP|;{0Dg|%)P>io^(VBRr}2JT05bgG{>SE6mk4ALfD3lREV|Wj5q#L-G^dCk*JP5 zOuqufQ%dg_vGj`u4&$puhwl|S1bFOuY<@H7H@E1^I8yB0@T;vfzIO)t91ezAc zUeCkO^a0<299bfUDV2$Bv*{&Wc%?)f;P8IdnLcD~jtQfN2YVKf zp$0;@6nyi)?(f_rEN~=dI6@QkRb{V;LFh0XC)1MzKcG=V$){O{;XzIi1+xU;BT!$< zFo0K4OV?nJIgxYYrJpKSobY%a3XK@Vf{_D7ZvBqLH?fZ`MR7sd+hW((%F|$}g3ys5 zRtbW!#4-16dVsui(MuODqI{RedRjnJ!sm6;jTyU`391RDA zXYO_nSPWctM(o!5YpTtr$Wn1p?Gc2+7yZ5u7u)Z)=P_^L5c|J?000X4A^KO!KP~)i0l1SX?o__p!5^c-UquwM!9CrE1U-E=oBvcSSh$%2mpq^ z48!dYv`7l+K09yyeMlv~U|W!HULaW~LHKmM-~UxFCs|eqk-dDqR|Or3u;3@^<4ftg zh6Cm8<}3K)_>M2@uCaI&p_~)k6=wH}*teH&6!1jBCh)6LU67(q5LCVvukcvl)6?&~ zR+lh)c~L0C8g3`sKj44SE;Yb)8Ag%3!yNLqkxZ4U^-*=-F^GyqP67i&7%l^f2HaR> z?Wazx7OMzwb!1s;l*Swb&wVPv)JW8@*{1(7#rs=cCzH&t6{&1zX3w(>Wt27j23ju) zWh0=FS_uXJ5xgn?{+BoS|AGh}*S{|FkXd5l7_~-)VCWy5T*Qn^F(VS@$N%Un2gIkn zFPx@EkEpxliXgds8@(R?E$z%>qgYIBuO@b;5-N~Ugy=9rH=jGLNU_9TW{6BN6Rg@1 z;6I$VrYC@lChBLJb0e&R4{hlK(sIM?wAGnaa-IN*2ysFfNEL~}?2iY12c{P)Yz~D( zvEIA19RT}+5RgEDV^o~vhv{ZgdR(BGK}2zJby0gF1VHF{F?zpRY`zR!PsX}m2nh;L z2*o~+rC}LiCO7Y$f5`cs(jt=;=wB_4wcJR=p+Zsr?YFi&E^moFn?8kb4^5Il9@`7R zb&{TS%QF0BW5eT0W|)vfMWL2)V}O$d1L5L2_wN?~vjDpVePWy#rnAzluI!E_{t%IR z6$&t)<|M(ydB6YQgMq%5>o5#K{<`47RDj#q_LF`iUOPb!2N)&P_F>+han3fS5p;ez2&ot~i1~I{hzbkz;ceu!(R0kujCOgI@mq zmra8;97)1Tf*g$5ruUlIT29}lX3R@{qU?x%(kHi}fL^G6RNU&)V3h!XTI$L#k`55kN|MrA)OS{0F0#k^VPXXBjweRzI z=wk{>xSK=i6$AjSX{JQV?^bQ=^$cp~nU>FmCLK}e; zN%!5{SE${o{88A-m;fY2A$f^gO|*PJ)Gw%>W#WL6+(YaIpe+GJ2**#4QR?V_C(Y%5 z`Vv-?QvQ?JuVK+Zv?M1#e9liQkNOJVubQ4C{)6@LM1{qc;FVWlIx7<%_oca6MQo{| z{gJ-(A&3yfZLVW%Nk~@Zr?&aVNHfw2Es8?kXpjHlAw4d$ekJ_ko{~{ZX+hVWW)~5F z2xxC|6ly?n1a(Y!`0ap#HmaA3&)~l-%s_6WBLMf6J=b+P<&`hOW^Z`+f`1sqK)(KA z>N@{J$X+P?m@ln<-!}F7{U@{z`H65>Rr}E$P4I`0Lb#U!*O;si$yZ4!un0Q@7jUf0 z)Lr{t*L@mQ)c6U7p8S?KQqq!IP?2;MN4eS&Q;hFI#P=@?&Q^DX;akK_|iu*ma5 zclYgLNq78Tuy~>j9C9%kauk36_U8ZpL7oS_qWfOVtD6YNgb+y5{wmTgO058OgJ2!d z&;n)y*I*fNAA%!J3^{?7r`~u3*wljv2l5v zejn^reSTJEE*=%q##f0BX7AWPfpk7yd|K!1nibcR>;RPD7&%3H9wtjyAc9b=Xn#Z< zzJ@nm4*xGty~dZhUtsSCe7j)ffye^iB@lEV5_Eq8u3p~W{IAu3ae!FZ44@bW+JHUu zj}p^~0WOXp86uRR32x+a}2ZkN3HC^lSsy;T*Oy%x=8*ANLffWfiq z4%(KK8>*7j4^Lw1su!^}Lcj*hI?T`&fJI=U&Yi9*o!*ciKD?&>=>f@WGzSvE+(bb~aIH7LNW)e%5X1o1^^xtd$p*rB#^d=N=VbYIZ`upZ&7Ev$yG{QW`G`!k5F9Cbt6VUG| z)lKt!`SxXW7yr=;ARllVTDW1IJ%FXWb5)gzpmxOk(>ireeBMp}p*Ud(I#^{Rc!-N- zmb^#4Yyt!;If+og!#9ee^184qLrAvMW#q0sDXM4^2uuO#i#%{R_g{d2GZtsgPk+7f z3KpVyup519_baSnhMgF$c+nsZiZT%= zcK;`**`KEWpo?bD==Qj{N`at$N+4>cJuk(Kfgp2AAO;Kk zwUw(BMqEh%YXD_LajAt2bgev4`zxF9KSLo<|Bvaiy70x4a2VmzI*M+5oBR>FUuH-g zL{M9O@?fv(l75zre*S_3fRs}8sA93i*|FNHfyL>Jy5~Y{D#wky1Iwx9o@t0MM&3@xN5#Y9 znV$WSHxLlm{Qurwto^e2UR(VBh*L&|337E9aUmvwuyT>jn>#R|pCuSEaevcRbn#R7~!t!a~^hNduMZZs7Ixw~JX`m{D8w>n2VJO`sl31;NA% zgS8)s=QsXUI>NXHh{b3b*dQ0zGv@!){vY3rA_G*{eV75MG!IP=5ZQcO`|c|C|62ep z{Rk8+{^iK7AyF@oY>0yhQe{z<+i^3|R7kT9bUi=o)P*Y_#e2nZT`{A>V2W%uEWQ8$ z0XZT2Yz8<(kZbXO>8R~Sf2xn<_4Vkl^-+&XP9xt+EBw@cBPj+W!k_=?bAONcAc5_B z>x*1%meL9btlb&@#IN>BTIABi|Dvko71Di+L*(QMXzz*$*gvOAkb))jub90=a<}XK zRrA-s_@SNp8F-Oxe*GH%!D>sdZr9m*zDn_5=rFH@#q_%PBO3m`FW-KwlzQZQUnPE| zeO{w~(^B)-+x|O!-{|htR{wR$()qXF5njBPiT^}Ww)PTUn)S*RyQoL}#-+5k@d9=a zNBhtg*F_01|Be>RvG!~W^B+!3)yUuZJGLkN2vI!3djWV&c3mI7)o@Jrr`;zfwyQt; z^hM;~5cBkzvU6Dbb(BW(|Dh_*xz=S@AK5$q(N}n$N(-)IZ}OA2UO_6qf)Nkf&Pkcm}DWHu!p(5699c9ZY@_04i|`Ch>3D|^7; zG|R%^6#=rw)t+UBBm~)gNI=s`;I7UnCL=;)gd&(lhOF zObGi%$JkbZKM#iiB5$_ncu*ryW%ys(QFyq$3mFm+@tBWLy-PBN7NZ%i)LFRk#(AwQ zK%soL*3FAU1@yW?)l5jf(@*IRTl$B_5k$bQpFSTm zT*5a;j0fHy4X zfyQcI+rJkQ(1H?|S~)NrW}=f13IgCHO;u(Q)?TVW2TTH9W(k7@CAM1!G(o+sP2l3o zRbT1L-1C8yfRsWfjXHR#v+z2nJ^TmEMAmD~ zZodPG9egQ_HBCJ;8UO1b3$TEt=H`TE;{3u?0e~d)7$+8;miOsQlMk6GsM)=HdQOh! z+dO}AJXh|yGAjxyE4h#T_k|^c{G|{MIb$vg^PKBhYXFY}02!bw8yyD11T<)Vks&a~ zq;XyXmrC5dv50#8z5s`SVFK;l?`X-uB>-~7C_<|FiK*EvH?}V zE1(q+LK6rP;=U-w98Zcjb#gQ`^j4~h?wH)6L4=Aq@KK60`di#r6`czFw-TNq!CCHK z*OmPQ79sH-Bj6v<7KY!K<&;Y)ep#vJNZO(C$busB5(k!CNzHu`GOwHPh^SYI6N=hu z{Y8pJ1>#_U5Iz5NBDM6=g#}{y!w4bPDU*YNd@TFT_qg!rC0@S(iH3ny&x^gl;kJ&Y zGvn|m0W=_(@g&|@@4C%OdjEn4-YHH+3MGz!9@qjz04cBzk%k7R)<+#&=Ys>_mmL~= zUcmX5CsbiiIT`yq!o;}-+F{tZJjSS-rB9uzuFwYvo7_f);}!E~e9WtMQB*V-U4#U1 zDjEC-!Q@-9TNth@>2_d)cf71#$F9!Tw$$CrmT1H-D;0stFAr3BcG|d<-}wFrJ5LH| z|ND8&oXjZTI-f27rkI7O_x!=ARhUDdLx^Q7_+{DBKG62R51V_sh@qEAR_m7ck2J5iP7eN`27WV(;|v!*gBkL-HD(SGhy_2Ov{|fpzt)(R~B{!`1Omi^5ic zTF@7OfC(5QukV_^_1YqO094=!en3=0h$69c+`JbhL`>@_ltN@6n$f)pia3Rf(C_*j zO}yr|zCDlMchS-NK@#!TC4Se$l_a(K#L4*gOE#b^2l+_6HR^bb*VtS?7ak!=5t9#a$E*UUkp>0z!I z1VI2mWE>cVp1va+_2)o$$I|a>zxEd1Vx4l?-jkzEr~&{2VR_H2F-06rNLP;xR4plloSg?4F>K-q853^)tFQ}nx(dsYr3ixI(4Td?4;%6Z9rKClPE0E9pZ0s@c}6e_|W zd`nn`W zAU+EX#+QJjeH~Gl++Hfe!*Lr9R!KmZVe z)+^v~VS78~IEa!W2xw1e4kQZXvoJW2E0$Qr&U{DS28><~GxiTbflq^yCEWoG9s~lR zF8lKf>J$$>1gh%eljZRNguo^9nrtZcyixpITy`)jT|sa>2}V!D@LUqCvBVrKB_K11 zD`WNtc5l%a{q_&#V4ps3eP#TF%@!kL;>P|ED}Bonux~7G*1XGtQ~11CU(Q1Bp}`^H z5gqin000iKA^mg(x#2!<|93KmN5KF96>v5I!6z&N_+BUH5lNels`H3Gb%CKNrJd$ZvO56&d?AZeJzn-`$@~K(R&&>+^rQpnC+220-Z)nDF*1pDGX<`FiE^gCL2> z20|ykhA2yQ5260zO?ZwZgv@v3`E7stsRaH5^qx7>`KSW%4`yu&ZaZM*X={Gf#Wl^# z4&^mU>(!hum z;Hb*^q3P(0Mjg}fPXF(o3oE;8tL>Zr82HGq+KZ}-_@i;Gw1tzcSI^(rJ_`imcsjE^ zK+MU*^{>U{jvn{t{IqLepY$O}MjYM$raKyQI!{CD3pmot@~bw~qY0e1{6BuY-27EX z0FUznjMi3Y+C=ab_y-xPf9$utXT#2`|`#1Z29Mw60g!q%Vx1YT`G-|s)u_#d6&|v%9`;W zY=8d*-WmKA`3_nium~XqUz;EB*~k@*dV>~HXALsI(sVp?fB&GFHECN#`I82M@q9>! z(}BR&SDfU;M-k*uTDgCG*@o^u?k#k^C#4mn_huN-fAn8V&hPeN)~estE?=7h-K8tk zUn+0}H~@;n#vEQhCma>W?-2QTga8t{Y5dXkyuOByl@v7}p(52l|>m~T}y z^%|;|hu=&^>2;SJ8rDDO+H06y0q|E9Sgf@w{lVLVzLS+KIIg`PpyVtW1uV8CaPsRW zd2u9uWL#f(uIfRK@8Y*Eb(spy89Rf8DJMu*+a1GkzNkC;rBdyHlLC_d+cAWgj4=FQd&v3pz37G`wK)e-v6UL8p5CdZN0Ym zfKk{#9cb*||L{T9w0c7N27%}r2V>;{eVhONg?JzAymQ>th*R^>VOp(perO-Z`4fMC zW)E3tnWxRfI*PzH7r$oEIEv*R&A>GUMAt4{Yu(YrfN@_nbs^*URX^)CU;jX*yY<+` zVQ?Q!Q%cQ70ue!MSK9k9`>q2F&?c{cFM}{x3W^{H;aDc(cx+#0XBZBrMlX7QdiT7_ z&;fvl1J=1-nJhDxvp0jY8kaM(6hqA?tcSnn-9j(x(&?5#M7Q$#?1b!ld_1V$r4uzd!>@S?g&X(j*7 z|MqGOWuze)DToUisY6VrArTxR3lKJ;|CwNWKz)gomf-KpKi%tH-IzQu6uea>ySDDJ z9BzK`-HU7v`o3-)65Sqc;vNbHOTSx-Q!lsf06^*QkCg(R6q5N7`$>=0exKSJUgqZ;)avU=Z( z&2471zPcbN?(y6*-It*eGfu|B9d(Bu{DdMB4BT@M^buK%%20}7`g!V&)Mosd_`2XO zMFPja8XOV3ySozQ$-F=FVlS8P?#$ATe1}zGc=~5AgE9#O=>)wh!LVQ@udXW&sHevk z;3CJtJ=>eeY8K4C#nfG!HWFbhVWXDx@Jb8cApyjAPYmoR0d9aPlyIXyImi0*ad?8n z2vCu}z?6qf(dw$IwNV;x%cb9CK>=}Hu0JxWI|LR6+VwX0yIsq^p}LlzZN6X-Ku`VE zxaq}+Wn8%ZfdhiWq&M6dDuAj+GFjM%aut3%S0w2dm-h?hyz&}z306&Qk0a$>N z2zWn71Lo9r&8=f1`;Il}GZCy9p?#Y@OK~po3)}82Lc=U^^sQ`R&-Lp120qpSkI1;p zzeOm`f_KckH*BsAzJ-{GV2uyol!dIppg-C$vIq(7@Vby&lE=Rd!6jA2Xo}cR4GO1H4qGZ z!uWTr`W+>Y(#sFuI?A|!ehNpvF>$MDhWUcR?w=}}dKhR84KcIMo&PPaF()5yg5QRaod_SRd^7T*Q{<9fz~d@@q3`i6F~?n1ZC^ngOES)CV@jS5PgI`KuJ%&UdNVDCIBu`8wi_bQ4c~a zbk|4i1W$Q?YW_oQBG~S--o#)YPzgzS4k*F1rOlufU>LAM3q}yO_@mIq1r8l4WVTqz z%N_&SwSv2(g`p!shW6Ncb@K055`$oz#=zXh!MuZ>5YglU^<<=o^P1{=NF_cZ)0;v}M7(a9PLhyC-+X=c1 z>0-AMR{wVol9}V_k8ct?KoEVosR%rO{jZe5OhewOxf2@zbjgBz?&0M zq9I{qZiA>Tj{Rd-8UFX{pVh{r_!nu@{cY99TNpj*LPK~k8>x#6<4KFp)(Mj0qn zRmQYTo9=TG%{vr((BsSFH-6adxKtz>Glxue>lQeJuABrnX? z5C{c90tB2pw=$q$GWRl>+9I6!=&%dADAzVNfVPvujUZ|dFN3#VFj4RW9aoPoY@NX% zzy}3a%OqUTd5SD+WGfs3gbo83Y3xuRPvz{B3OL?Tf}&~`bn@{vCpBcXMJu>~~L`>GoY5sCff(2j58fZ7B8%oXs)u%v(omOu^xUx_8pCaD0xsLN(67N z_80nP<$4WG%gv1<_7>rIb%Ak<&G8_37$5@}V6a@39smLXTEGjYviIQofEG3h2MPlZ znPC7tyaY<746wVjK-U@#E`_7V@dhn3V?bEy96g!4biaj55gZ?USMu&5c5{^!-R>|X z3kDu3#W;>>udZH&1rU-l2ODY63%ycvB`i4()~xi8!u(2#I{ zOBwwdb7r*<0T3#Q@pwMKaxD-ElE5;+4^c&mBqiYD@NuEBnZcpPUJUG-ggb6ts%JPj zB7d_0nl6I>1|(1&L0tF`+TeS1wT@pD6Bh@I+|C35a6nJOV!_tGuH``n2QMu+H4cLi zSQiWoA7IcpJ9rmVXcuWWfB0tB(C^t!+rB4xhnOMw0E!AheUx|KPnonHKcX(}3g3z7 zaA#4)JH&K9D{d>qv0@~a5Fo9@xyZuH98`RQic9j<2pbuph(t5!aGi2T*=F=y>EeU^ z^BmA&1Ub|vg-N%J!UGAP*0;e=y@r)Q001GMCAc5~5C8v%hl&6K1ONa2utUHMi0lj( z7q{phgM=Bp)VZ_Y{CVMC1&m&zu43|h9wHr**bIukUi-P9HGfTe9i78{eDk3->kzWeL z3M2^z50C2;;#htzuK34<)~Q{>fZ^Y}>lK24Q3pvczZ#U(LiOO(uqTld8-X9fDAaG^ zH9e7&pH1e69H?3A_Pk;A{|&aOz#s*$cGN!wweY$O?(@g8e!!4G0|EdVKz-+c!nL0j zz3%((|Nr4$2T)%3zE1krX_+|q_)7p+4NWQi{&&u^-=GZV2#o6m1T}GF-`raF{MK-i zL*%+bt%#r9tj;G7{5`4weMPrUNB7DLC<~DUL&n|u(soFQD9!mbw$;~Nb*lL^fMCSs z$y^piNCyBBS_KR^0TWp&9@LQli!P$rgMbK_a2zB68OI<#&Ui}5K_n|7kBTf&da8vW zr+hfN<(oHv0rLMCz<2;2(h;YdjG@@e;$IlP-b45SMiwpaHUZr!W`7H?d;Nd)H~1ii z{ri5{Dqz`s=Ux zH`t2>{%=?YU@=Ms3`Qs7j`;v17y^|H5h=WF&yke2;bnRtYoPFp2_|IG!sq6p$v0M4B+A5cU+ZESiOx`QX z!T$E#*%Rpl_tO6t57sM*;jsUoXxP?MpQ^c75&{#ET%s_4j<$<@7(bS;A1Svr9OAZ9 z!{>$Y|Ns35_%fFw|NrPcz%wMVm1${Rt*t8h{BXiAfRpUBGqUnC^dH4|4z#qob?XyZm0FvU458`UYV&DrCAqD%thd^+^u5nwyz4IOaHh2;$cu# zQB~;)jb|k|Oae89X1_^h+wps2{z9k01j3;rrZOMd82?;$*7S}Z-g}C&=sd9y<9OiG zCm;12f}pBDMcYE+Ped6_eGx)tim-)3yiAnT&4MuFEKC*Q9DjDj7d)7l5Q5LK8*x4s zD^fmq_io@~#nv*gInh|JW=RR*{D}WXd65c^3&vcJt+lxlD^FEVIn7rzN&+f{4R5PW zPNtf7_V3!Qv-MLIMOgR1CB{tw|KkJ7z#+$M4$q@WDQG%|Qs;SDw0(d~F{Y2`i`VU& z{zuR3KLu6a|4#@6XE*Q>(dqe>M1@jJObsOG{|65d92W@Hax`V0j*go6OJb{pc1qg%^{mME z*Zu+jVJr9Bq3dOp(~o5e~LWl)s?4>tpF9R$_fUrZ|@j=0$p53_&{6Q2_bw;wT*?72*lUPm_C zY5pYk;LqQ6=TvI;_^!B#z*F z!@wB{RX@`_Z(F@0y4-nsK!}C55<@BjfO%S3b1g7j044g6dskuyp5i^-UF4vc;L$)E z!_z-!OXG*s$MFul^~!MFCY0&}+e-u=DA&7<5AR|lQ0H=?9JP`Y2M&waI<$l4`Q)NM zvu3LebdK22fo{1sSx?RyY#Yd%K&b7*{lzsPtMkmN zuzQfb_B>=voDx2sY?EJTo;QHRJX4t)C%v0%%k$N|C_7nwH^+B=vDOL?aflqS_B>=R zF^B2tYuG;Hn(M~@l|NGC00=B)T?E2Ky8T$=2~C*BB~8~~|NjxL&I6z@3InC7pmYyk z|M~=+)YN*gfB#{B{C-EjedQpu{iv@D|9)wPEOIC;Za+MSvhdie$Nm9;Az_#7*@M8* zWwvaHRDU-zTG`1M5cl>Fhm1fMMo@L8TjLd;a?T79`N%0tl;0m+Veig@ z9A*lz@IA3Gbz`78vzJ;!lu9bk%Brd-lY~%evr#3U)nLFD^sb`H^}Q_m^KzkpP^gF7 zSH8ax5f?>A5xqd?C9c~C`rIzQC)K6OS(Ru1{cR+vZB$=gmbZA;d2*YNYMsNMz`QQvqFr3xl9}IYwYR^V82|eWLoPSP2pY4| z-eyZ3sz3kuSTdy!TlooJz90VyLCPeK@Bb$GKM;Y!{&rY0=LY~0Ik2@>k{ZjH1;ivy z`w}Qz99j~RT}E9d51Dv}9m_^s%8WnOAOT_+8$69hf+;ZWIU@&!umKIuaFE%F;lPxy zhrQ*3zGfv4>F=(f2VNu+PM27&_&GR+&)iCiFfvH88U>Hr*sLllCCcb(V8*C<#PGgS+hp&l!d+*oZ;pz0E9K~Y0iriM>y`_JL(!5UTR_%)aUOS}%8YFPGqV+bv zr#Dpbx~lNPUOP{8p;Jq^uiN9wK0CLlC<2{MlD-0%bB)C+xTM?TOZ0xm8KN1X&_syK zl2VvyLOD?qW|b|vWPANQZh=v4BbHAA;=pTQ$Vd8l%!PBk8I$IiTVD%l!0>vN37IB) zmno$?kL0rF`7=1cgZPnPfsJ9sujLpnb~x_8!Hm)VdKlM4t2LMv(ruj%L%RhXHPK7p z6ep$??VR+-4JnHsc@e+NINPi8nu4vZ7Byd@rnj7*{c?i$g-iS+(R@VRqnfU&E9=r= zB64J&BF2R!#e<|amhd4-h**=q_si9F5vq(Nd{=fmm*R3ZxwWxeLGt&3vTR0;|ArU_ zAGFPbi(mjC5iLRIv(QY|1On15ygU)*H@#lZkN@bL`Oq^dEd*Al7gd+7$!-h##jP%$N`lrSPT!qaAn#0scUko#dnH<-XPUD}Z`Lg~Bn# z3R(a?Ud#bg0g6|rs8&s6E76VjOW%I+E^*fP+y?>sI09Y&Yk<&5&xpx20(T<+a-L2g zUNc&4aSwBxOt)z>{2D$JLLlwdC;MU+W;?abR`Ba$3ZpjF@Ei$2O6N9)Y0J7AnW`xe zU(_h(^sQhun87&HjniZSEjM7Vw;yMZ1XEn_x3#S-syL;<;HoZ&J)lsn7Kh@xHTQ3u zJB&DFN;#T<=~Ka*KsBX4o;^ksv~d1FtN&JU1^jVr_Ar1A(==mozJvZX{uaHvzpmyUk$;6IV+9xDfKP8E>t<}hQNjcLNZsR+ilczt5H zC&w&wxriq9E6#-Hm6S>E6&vv#-xXLLB(W>mkk!m_zebPtY|7SSR5lEX*Io}Gb#m*F zbjo0Kx_@F^f8{z~Gt0;(lD~)jXVUqXUUHifpq}65;R+1B<^c0P30_ zhyUR9Q@I#whj3zaf?bygoiV9Z>39siqQ%1TD35k94To^vu#x>?QUe>zkVvLd*tl%5 zGRaYHTfx_Kz##gdSR<e6XE>{+ur{7d_RUM5E@^PSGX9-GW8VDKn->;M1F3Vl=i{=v^cDFPJ++GN;z#iqs0zN}acW39{$heVfrcm>&IkuUvm!U-T4 zDb%waQRB+{Q6Ml)nWfdN4HdRp42Y5_yHyhoT2ykPtI+u7m@ppr+wn{8g6kyz-Z?ps# zriDM5b>d;BQY?=Et;_S|9|3EF|a7D{bVPCy0VYRnv6j zwekM8)S27w+xyc^vq6HBEth;7&~R=*R#?D&(hPfJ|LNQqRO8dOdEkI$DJjDR zXVL%9?2ue*|3Ua35j_`#A;Sy;E*JnaUrtXK3x`||14X+k;9%iNzd!xl1BGxsmw{!? zcg>~$z%W_DupDldhVoYrF>nVp*NcQ^`{u^|)2XQZ8ljg20&Fo-gGiNCG?@nmR3wKE zx{B_IKdZBi(Qf0qA_+L^sL=m_+}=DFk7!!Tl}F9sYMV8$H2A916pwHy|EYPLE!7~8 z{=S(($ZWzc;`zGN+u{c~l{1ouV!NxQnt)2cpDJh?%Au}K%Pg*}; zhc(RLfxj*c3ZUb!@J383o&kKM4go1i!~hdTuk)}CMM*n^Au;Z;B$$MbX!1v83JG9)dBN$h?gx)& z;B_(Bo%VTZ5KF#WMqAZBLeKF4`U#sjDx%cV_Z}^hk88cn>fjd|fgNXsOTA@opEV1e zcJm#hOt#U(zwiIkAQtCyI%*F{r9gc0LSwY7;K-;dmsYxn*}VBoLv7FyrmZGS-dh?t1-A`wkL{~ngQ zUEFFv`eMKmWMJt>r~ga9?N-+FL3`@M?XSbn+U`I4m>dVIi7ub)N31f>m{gZn)AL%> z|Ns7(ht*4KejxWSgvQI^D`at{zLnlO#OaR zWxggfMl_5_naA>I$@MmpiTj*nn^+5X(UHgYTy7e-%(ULxU z6GsLDArm#$@gqopzTpwWCCwxE3<{xt7ElII+sa<42TRowSjckfLx;q|AOc0=tR8wg zw7-rJfpiUkx5&cKE3EuBX-5p1@yNYgx5elhM~6-KS@*%r(s~eAVZUAW0@8rtV^C6k zT8f0b9owM7OW;&58KvC^Mr1&x2waO2qo}aGBE+2Q0l{V*_<|CEXnBhZv%LU;S7%Wk z5GqFA2&cx_djx~AB@u$G3yTYt_PO+}0o5s62TRj1wj7%MtY(|jdbKor^{hH!@S6;K zr~t4v0N{Z4uLMOSu3y9a!%%BhX#QP2DDuLzp#)EvoYYxxXF-B#VT?;po~yYo4M%jY0=?10S|9(gT+5U+;r7 z7+N5T2$n*$+m7u{>3e}rD4-Y~GkUc=J-K?uA5jqdJqlk6aufZ21Dc*?j(rxw!u2x^ z{Nnk5Y6dy!3GILzX<74z%XQ!Sj@g@w#z+IvCIX-*NS<)r=+vbBZ=6rC_wQ;uzrZau ztt{CG1wIY{r79>gvGphcQmOb3c^`x@!Vid4_-j1-kVm4Yw%SQNVZ78K&j144JTf4og4& zRU48dUJChb*5h-eiJ9F|QRd*&ZVo?$Bzk*yAnrku37y+20h?jVgl&5H#_swi3*1S$ z;E#AHTOqrffWg@>Y8k!xl|4*2l-{ZXhrIZBDh|FIFW*j@dgcCT?f|cJi`|WN#bBrN zL-}JmPTmp#1T30U;mP#Pf3QOVx9mwg<1p|n>HO98E~|YQ9z+Y<%zbiT+XPSj?9wDLyElA16h4Cf0ut8W zo>3UH7cvd6sk1Dv-?XW}|LlQwJ4gnMXMzS36QAG_vcEl>Qg%^8Qb~Xr>o$`+7qpCo zD$_lfNt1n#-SPYcQVXCkNR1XLrh#BE0d=WqQnprAuq%PUannwI4EloXih|M6YD!2i zc_Z#7`#5+6!H?$;a7wEG)|ZvTka6?y-4didBxp}2c@jgn>)%6PkJ*A3or>|eL;!>( zzor4W5{LM-F7|66LqFa{{{OeyR46xZ9Is=NkB_liqhGDCb9TNb!E#&(3OXiH$M45u z->i_p5Fij#gb7IlfB_2yTd9Ps2Ogx!)aU>6hYgXIGT;FfEX8uyIMMBTLX0u-G>aVI zhv}o4PShT8fGtxDzpS7M5Hw&oKi37}q?ZJpvr|L@u$I;;XbP9m5o`twNYDS%PXdP` zDRKp}`Fwl7@QtipjDQ=o2*T3o2>{w?Eds<2n1`J3Z~+_7`rf1cslmZuvmjLzTwNHu zg(HR$)z%U17-XOrk{$%_|Lgy?z8-=ablI;ZIK>#XPDk)G>EVDL{sw>_R+wh4&gKl% zzv0@C{L(wdOh&mpj%H4Vx@et}o#)P<^Ojh^7vHOr%T&kzovJvBw(zMk!yFQ+l7=%B zgJoIbh{0dh!VAp;h8jnv0}fcD^?U2Jm8aiGCYmOh8G_mWas*arPj0w0(>lv}F%DR@ zx&Ka`*1VUN^wVCIR2DquE>g{orSMNHvbEv>Y&`=#kx2mqlhS;Z4s-mKo znKA$dy2j}(qDQGl41dRIM_1S}@WKyI*@R?W7Jhl$#ntZc0s9NrbhO1|kqN*Kfa${< zCJ!n#I!G#T$O{7aPuplo0Viy5SILf}g+4|jkI96SK@)x8In3}>sJ%a8Lp3eE0CBY5 zZBE8(z-%e32O(-6X2;LrumIM3Aokt4N^f9hkc@*3Kf%#x=(M$#xFB~5<(5sd`2>^f z6iZzLpQTKg1EO(SZb9JzmadZr-r>c9&trZ32KL|CR=S@&Qq-aD#~!Awwy{ zU;=Kbpw9S+^oNH)44OR)R!(#63V>^^7!zc#%JQffnHsl?oAg`$^~7ty0-&ZJXtfbB zp;+eMNHJ%es0*ROa;Q+uOlY^CtjaHsf7cdnP zDu9HnhE8pKOh^DBrZz*bPqKE+Xn@~Fsdc~%g;+#FJ#YMOG2{7vK&Q-47gh|Zr-!N} z3dc%h%atiIU31GmeMmT-k31Z%4nX#)1fZZJK=ZaNS&}XTxd&Tmo6%|06j!Y))6voT z(v;F!G`nlG(5 zZ*9GMSJ`)V*ZBWTeWZQyZ<D~ z+E+@mFZ_njKy5VsqE)?A00gBTD}vOLKzKxV*d<1c z;l~K&h(R-*Zr8$`!Gg*O4-j|;BKV=={6}Nm|MwU?3y(>F@f{riKHG10m`d72JunMN ziMKBJAOZhua8IxG!;i#)?thb0qqw{AXW&S)Kma9J1a4sH0KjBrkeC3FEgDFDY+y)C zJM1dGa?W7VWW{8V_jM}7Q_-#ssWzLy#A2Jea z`jbLx9aDb+H?KBtant^lL;v$y030#LQ@UbD00t63iWc88!5V3%E7`yqgTR~0Td3n#6Rl0p0W1lx|B7*(SD50d!XpMOyW9`j1?sg_u)2{SO5qEYj)7n z9@k-z7A*oQjfAoYNUTs^|6l$y{O}Ss0n@^lRkib4>_g3gnD}3a&(o)~g!OD_L`(Q< z^6&Q%Q#P9(~Z{z%a{@PA{#_<#!ht+`cFEsCT6*C}~ zrWe~WycjiV*U#h89wq|lUk`p8c>g2WR%O!tTrQo20+;>_%mPl76bnO309S$lo{I+o zKrZxIP5@L%(g86Vi&3C_7w*<5=t23H!wQQQ04}iZ8ymTK`R;oF4Z)~vtE&cjFym3m z{33ln_P4?&Ed5Cuk@^p`0H?0h^>bztJQ40_TVPLuVQSaz{%Ch@)|GCkm|M8z#sMA8 z6QDI;hdTA4&OgQuuKfce$s^HGl|BS(<@N83fAZ{s1)lr)`PK-{(5wxQneq{0((IHrFZRvXzb-B%Ql(B<@vzU=L*~L z&IG@u_NNaIDqs>GTYvC%cCOE`gTQtVePA3}BU}TEBx{xqK$+9{2cv0A#6m;1|JQK5 zu0`CSt;;OTLjAZZ@pSg>qEVCM+q^t&v|jfY&n0jAT6*iXwZj4j$I4BN*q&O}FL%4V z1s|`%!&QWKeMtuNW4jXO zfZ{b7K+98AxLZegN)5^OAga5ld7RiILcntCR!Z(&}lSnzf8^-t!Xr4j*N13eQO@)J(a zlwuIPwBL8Y000ZCA>jlCxS;^KxW&;vOE;b4cnKmaZZg=-cK%C*q6TY7chf=8#6{(qr0 zW5-cX7-QeAztuzt^M89nGebC&o^CDwB$hoVA&gc5s>*_~L$ z@c#=jSU$|TzsA26o)#d``bVFLA2FT5&(SBOE*AKD!_^USUal^ymDoiv6vq{)%eLS1 z2dO~|I`0ltluwT65#WViSeU^UbMawRPdXWTHlTaLop@v*4YG{_#K_JH{Rb`z$sEeTJAO1_2FzwdK5M zsR#+BphXfMDdX(MY#KmBMJEuo&!1CZ?Z||c?|W}~pF(?FWZr?~^K-p_Wf-uGB$p6H zh`dMaLApI2#?Hlc^C|D&0j{%SK&~4#v-YrOW?sSk=Iu-1;sU4`05HHkV3=F>nuGm! z#kcmkqUlscs@XQHtVzKg)-K=6R0KfK1-uBV1`sHSfHaT>a0np)&vtnIrfUt8CJUyw``bC8Pm1wd+@E*rL1{&d*QfQg zE+>-;#YAscE`kOd`ma?Ilg3CZh#W1F3a4Lho2QzHm`7yGSz#clM@Su6P#NW$oJVt3 z_3&?1_WdO>N_8LR70~*vU(^QF5QGJnnTGl~kJP5uNiUg#+U3u{Z07&R>=1I0elOC} zN+W8|7hx+Fxk7y`6g;|Z%8>FT9YYNiVvJG67^4SA@l|MXcqx`#*C9E9ur6&yE?vde z-O%v6L4En9e)I*XF=N$r1N6P$z`YMKz>^9AB(69~gN3uzU-R!WxZT7+ATXh_;lS|@ zMG`_B_3^dzx|9QM1u#jQ)Tl;NbqGENQWhV+3s>doU(0Dd+_?LAv39(;w7W*{+GmE_k9;=zE+kF z!D6q200iE;?#$@yuUL-vqG11u4!L51#HrwX)*;(2;6MR707$@DVL%cVG^E+IBAbd=7>Uf^gY@BEfK7eZ6kW-HsS>aENRkj};wE{~lfO5Y!#{|CxkEu`5mNI^hD? z-II0`J0qk8IPWgl@h03e;z$e%<2^ts;bhql&Xfv5A#uAT5CmVKTMmoRFEhzm;&hG6cZ3fL#FC z9#O*BSOL^g&n<1s<4_|6LI@THJQxE2i|^gvi%_rwVI9kM0I;Yy69=FkKstacz+?%) zj1;}wX#4|$*FMv(%LM6k^$Av6VSqw_JOM86#C|D319Rj?;F=S+V}z>TzA3~535Vgs zTe`RD$Be(O zfbU}w$hF3t}y??gGK#$qcp@pRM&^DpDvIMfNlb;0Z4!$@EX$qBG4Lva1!DE zxDkFr{0yk=Jb5m^x_&AUNbC4mdzm4I(U&3OaiPLR+^~%#JpKQQ_TihapCU+w0+Mx@ zehF*c-o|WUJ$ykebsZZruhg_5GQlta2mpI*8<&xYCr%Cv;5d)ZlKT?If&mCBIIG%r z^t}Kg5Evlf5D3KuVuGm~%i%aqSUd{?e^$pYDC^?XDpm{w!$RW>DaGmX^vn5w%6uG4 zF$(!c<=kv#2xu&V5|UjJ!fJq+sVO8J#Ww8enX~x0vtauNi$eI`lpewHOAxxQ=Uc&o z1nKX1haH*#`T$-^7z(xPTfaZ$@KQKfYr`>qg(5E$7w7>G0PY?c59;u+RKFCtd(2Ng zNB6=BHu2lvE4Ttn0K|ttRDtji3(yz_g};KhypVMzmIhz=`o{yi`}7~-nJe&M8_wd3 zmhg8B0GuFjF9(AO>+3J?zF!Au5FS9ZK{z_TF18g3j18IMfO&v${Vk<%d+*|&D_wnN ztZoP(04bzHAORvsfDtrd!DT15ygm;I;#T*jQJZ zLbAn!fX)Go^e<2&_)8&(d7%`9Ent*|pw}No=@n>e88UJapmkxG%`eSlB0?}61`mj^ zV*&YAzLkLYV=<-q33>;?{pipU-{o8g;5`ARC1C(UP*(VEWZUi<92h(*#5}QxeY{C+ z+@d>)$M4Ie7huE{2f&^Y42(g@VL9*tCs5aa?iv11m-FbJ(Ux`~3-}wKx-8&?t#eR^ zgOC_z_nH_7CetHZKg9m$OwVYkL1~EWPBr)k?MxZU&{-hXonY za=ubA0H6s1V}pU@1cM#EzO}|9!~DCu@(4;h!WMFqhpNW;VCwN1W%L*x)C0%?ss!zv zF^k;eb5FWg%>z8<0Zj-CVMTCs(p%Gg@5A7La+yC^|gM? z3~zfBM}P0GR@b(Oik`wLk)CE>`V*GLUe0m~2}{L63bIxWck&m~!j|@W0*FSlNLC;U z=?=o&+(;SkXl-Dk~fk;NVHc1BASn-jTv^!5#SO^hGb= z`-A^k{1%4jz9cpux{%0Ib(h=_15@02M-t#39;8Vxi>#F>-)>_|R;!DK-20mhp}OJ=m1QW>J9RKjoqcAANpbFPZ>a01gPSkpkeh zz36w;6OAJf4j{R94kex-81Dkm0vRZpbnG91cLA^@69MhqKjyqBn$~ftL(Bk5YeoGR zA{9?Apeq#<U@49O z%>XchxA2@kJg@*TrQha*xH~TZa0^*%^!NaBTEHF1222)&I;I~omLBPSoBt=?g0(gYG-hIn;6E-`{r7%yTQXCx7OtS zH)k<&BH4}KQh0CCE_m&sFx59?z@opzZ`EIC^;vrdAjld4EWx0NpzJ8GpO+e*s~5$>3?SQ*#`eq0dmll*QFxC~ zYCu#k#~O957g^%_Ui1v3BKt?nmW-t;y?65c!+1E0r2ujKL>D&=n=%q%C{UmK35D29 zE+KIqBV6M}>N3?Lofd+@Ti}>RPB|G7Ksfg@TZy5CcHy46?ruz8-reJ}=JW+cGQVnZ%qVB}LR> zG!+k)XAsV9X!fgB0%q4w6#XOire>)ABf%x`oK-QsQ?!|H_vgxPW$aZ%LHIKX zm>?ry6Hr)&5K)!J z2WC473DYebJr1=B5sP~U!R#4`H8pG=0y2h08$KTRf%7%qN-PCq8%j(&0auK1pBiv@ zuo$lv{ywE4sNiN6F}ZU@86&es0VG058IYbEBn_2fNN6p z|C@2d9y6wQf3=!zM*yOu?RtU!?z+Ek^Y)0ua?|h5X-s2jEr; zqPKN_)%k%iDX*@*vjjpbTACtZ?w1|mqmx?Ub}e1sZDs>NQ8-`;0w;INw(rliEyOXw zOjEBc=w``p>?TS_6ca?#5j79xzFj_rJ`8GW*mJESH&@GJ1HB(q| z6ddvw(L+!R}w@(9ul9oKYcVN8hI4uo` zJzbeczL-Hd^R?)^E>~~`kTR11R|IN;3>YZ}mc`KUo4P*GJfrgIV)Q#yGVj04L15Ow zn>rcay(FkJ5=!DmB?>w7|LiVr@&5|PS#%y;w^CR|9Z`D}bCa{oa$?&kwrs6vc?GQp z+7s%><@_3o29SRVD4+9Y!7;&bH5}6w-o3(ZAa4L;fEnfXDZ$?*tOygJ%rH>dL1$e9 zK*fU=4;BvlJ{2Wvv2fs2A=bXNpao6_06+v#r1;fZuD$f?D+zT*80GI48_`Aq02BFF zgGz)2%GUYgyyEe|?q0Fz@w5LoMmgnawOm`72-54{EA*M2obQx0WJ z>Y|kCDpmU3d9?8k*|ucJz|GdO#&)S{y|y?=D;+W&DXcwVH#?9C>?sTg9J2GuiNBOs>!~2`DWJ1ERHokVt)#&Dj1C)Z7*DmD5_c zds?~&-|$@2s0v^RWGaHS{m?&O2Y>J`fd2*Q-QH#@hA;tqRh!mVZm6X(L$E_|vsLky zT08xX^2P7FyYTTbde9G8k@?i@L^>Sas2|qEO6mt)QEkjGZv%qnsG`s_OoJkW z=qe7y7+|t}Z*W%dt^T+jywLfELKJnP*vTiGJTFkt`xK0$o`b-!cwjB9x(?fN(y%pN zp43^b=sS2eK~cx9Rz|7b?EeNgLA1$kx4>fnqy=RNCAo9N@J~3bmTV#5EtV!crRDN| zqbn`W|77WXq54`c>1RPbKhmnUAo$lv>MeE;VAwjKJrG#EgXJKUgTL6M4Fl?5V8WQu zp{a_PdN3m#TEn{X>yQ8C1W}g!Fi$R#yeejT;&+a!}H4v-S2%E|E#Ur zViJlV9rKDo?T1^b@ofieXj5m`%TR#JhfoQUMV+ohPw$~idq#1=6N7CE%r z&DI>Za*R;0WI^Q^G0@`Qqy)VFD2WGLgxVGy%9RjUqY5CAf{-N&jR}rA_*3jfT-5nj z!7vj99Qwi!Ygkdk&~C7GfB(P+xJ2A#iD)o`8uaF<{zf9WWo6)45W}a41{+U z$VCJ>h?fS=LR~Gn2v*O_S0E+`13<_kt_U!MI9I=dJ+sIGqt4Uc{m|F+%?pV!!>dhm z0RY(l9#>o(aUZ^dQ_&eDUXXG2*ZX{Mb0*qz`0wUgu&``Vk$~b9I>ycj3?XMD^)((m zIIrbof)D9pBmT(+7XE_5Yu@|A9OQ;6ln!H} zJ?{LfodQDR#v^nkVKCP9BGdu1ql4U!X>Vp$_R=l zj}s7143pv>$)g_<%!J}q0q2E@V}yFraUH0%<+tNNXVqrIe+K*Mbye6C7!w2S`u_F( zyxtmPK7lu1YHZ>7ZUkC*t=~@8Em1Esf50#ZA;3PyiAdhWtAkkda(GS@6hWJ?ScxKA z_-Q}i5GY3-Oz8VF8Z^hT+rA)WzE6Yim5pUTm4eQl_fL-73<5wS4jxNcz&2%z!Y{9X z)ASq=8(^g6qAkCFxI!zcs{X1Yl?Z&yMHhd<+@q*^FSC}4Zy|ysc*rO&s3DIh%R{*H zfe6pB+!vhvNxwmc2GA8CrGf&$_Rt#HEZOQ#7F%fOs$Vw4D<-|pH6kvY$pb)ghYb))**@90uct@9C^677j=7bhC8)5@0-^UZ))g3`>j|{7Q>TL%f zwL|$eNj@urj)AkmR+aUw>bdSKPH!WI?YJ>env(Y<*P8i!g&ep+|PeqC?qDIHBZ zYwtN#whqeoRsMrZ5KbX+9wV-9GSG#V7g(S5QGH%N{Zv<0gg?+(LX^EQzJrnL|Eh++ z)k9zCiW>h#QELS`3cyxFnAK zJ@5DYw6JsuWJ;~gem!W#U+;Z9;yML^_ve`U@T{f7X%E^l2EM&h|6Ub^^fwWwfAjDJ z20b^QHK+YN!CHsojPbd_cBu7F|Aho)rjPLBrMw`&d{~?zWoT<)*frR5q%U)PFAnf@ zKcUe7+1_3^f`->@ME*PPGsj%_ChczlZW?ruLbb>Lv5;=YgV0_uR)|%=ucy=G4b5Lg z^8)y3I?g*%|A&t9s-j70`H1`S;IS2R>NOAHqS4UUC3>&_&HQgw0($%Jk$cN|U#z)7q&qTD1M{@(0744M ztzz5ncC=+z+W+tu!|VUcb{lW&@JWYIK?cqP|Ai8h;X`uZDa=4>|H$9S9c!Oy@2@&_ z_FpW#qZhh0`DJv{KYd{#r}xEf?)i8o_WygWCg0tA{9sBjpNgPIusxFoz3K4^r}rd2 zaVCI1Og5=;C;l)=Y5~O6lGA&%8?gv0XT8+Q&ls|}#ARh#XwEa-?bIkPMc2CUxys{b z0ks@&mVn`KS4#5OdFSG1i*IqNI`F04-P?Qj<5WBFR|Rt{iZrUg9`ja*6K?3c!IqUY z{{4fINqhgQ`C1At9i|CX2opxv1|~~Wo{LwFSS1t;N>-cT000XXA>Ke`3kzyQKY$-dhk7EkV#pM{S6Uk z@~@*0V=MhsLI?&bPz6UESC|n?=IkBX;lYMkd|c(3W^vNYy#@JWzwB`!G;M{H`aLm;eG=Z?b*#gR z@_XfyT;EXGL?l}u^22`oN5_vWJ*D!RP$w^gWHRlGq?lYQ&Be6YorU!f2wZ6!s1BsB z;)QPTcBRuPwC4z>)=@RCoZ0lsU9G~MM@@n6%`Q~36v<3%j5;YxsZGdLTTy(jtkwH} z<}#B28;k&HLc+tQG34cB;r{$le6h{}-qGV`lnOFpyYUt9y>7z%?qz=;!ggnGsk9|? zuOQv%ZT&=}3!rEis-S2a2EoKGA#n?cT|=V6g7OkL%c$ditKvZ)qeqq_%cl;3C?Uy2 zFwEY#j$6x#3Y;~I>`btvybdEy5rWSOZoR3SxA)sFmIH7CYJ`BC07L*H01$P)I)!Q zS5y>(JFmF}JJM5xV4dXlgOpjZapWI(g?YY0x`& z?ga&`Sz+#>GAj%Wv2#Fd!ay#wJVSrUy z>%HVmzZ^7!4lE`?hGMkJjyO}}-R~>#mEwg83<(>RPcU*oLUOMu21 zbfUC5hyVON54MlQZxOsg?T}0rQ-7w!5B*8&;wg3W4}gyba&)v0fO5)o3Ja>ZQ*qc> zV}jium{@rZqHYo-SmODxT)yRu*$4P7-vyFk5daBP%ti~_7Qxzs{@xC(EnhBt5k9=b z{X?RnQO~pF`591sn|)~(9ugFLbvHMC>%Uxh;D#@%i>m&FbyZ&uE($tqCOp2qVkQ>w zUU7#>?<}VJ=JR=}L<=x!M2r!Nbr6}x?% zc5XC+dJ;1aD*k$5({#{?k3S#jQKeuWFlX36FG?VBqG&iN@n3!-FYlMZ@c|^I43v)4 zUcpcb0Dc1`Ek9o`(Np_;-~Z?&t8HJJ9aKg$h!D)U?*o$j+yo*Xd32ySF2q)fIwj^k zXx}-UFU7&@2NKL0C+?oPwef?u4TPe)*X9e<0#XAYz4KTF;;4V1vTV67mi-%3AY0}= zBn$8xg?&=)vo4Ey1cJ^E3t*+`wk{~a#IWIGvjb^wwXLD*6>O$Q2W7wXIedG0RXi4g z92J28NGkK*a&Hg{zeeb)2;y-8N`UVe(*@NOE;xP_dP5%-65JWdX&^qvq`#A_B53=V<%!bAE z4Fk|TtAT(PxL6vikBAgPu~>zdxPlq|ih;`bTsAzSAx3r)5Z#YTsaHS=+dgf-(Ui{d`o$} z^M~X@Aa#VlgQeGl>MXxn1@KWYovg~b0(ts0m_u4571F=iyh0TLtPtc5stzwT#g5z? zX^~j(_HTODW{3Sd3$-N_1!T6ZGXD40TQUT^P6^>UUvH#F40!Q5U7!VE0xoO;TL6)` zn+On2rXJ+RP_ydA8ujeXN^k%66)rr`S2AGh8BE{t{uS_iOG}FqUdQJ4FCc*CWf529 zREVv6tXj}Ke=jcoWu_46`w1UvT`FV+u7EkfCc;GsxP=j9xZH#CtaqLp2;5fhKbJHa z4@GakzsEoAOV}cgpqTTf3q3E%@47E@KjDrW1sL01_742#7h{95cis5m8te8nwfMO! z)DIwj%I*#TRe=y45a2lg;DsSVNfmn>8T0PPN1BTUvX*$CWBHPb6H!;*dRU7x}%B%nTy)@eBS|4o45*2{VGdk3702O7`kD-kU<#fjPR-#0oTEFa11Ao?d zIDat*YEOTa@A`(gS;7|mfBvu@khsdfb`nYekgLgh!P8i7!HhF9SaBSSbztzLd%L!=iYxmKrmWDm=Gax$ps475hoOro_9SuPmN+>t zOaA%FE5a8qaGYR$SIg)qnZhhcDpU`zWWAS%l+$tPEt$O$5bdN8BVchH%k9G;#AjW4 z@myCF<5^*Aub<8KhqMWZ@ml-(T)lu+z+w=fLxBtt6@sWIzJb6go0447OMzq6?;kTo z<&}vLC0g3@t7(o@JXwgDV<6+{WfdE3YhwY;>4r{c=4Res&Sl>*g_j~$yF3j6qVfn1 zck&V?z)ZfK_w947q;w(=tsg-D5txo?KUjiu6EhPhwbylz>v2zO@L~n=KpqaQ6awi( z0YJfoIQzbe-|wRj2COu8_d;&^55Wjf6k?t!g^fBDDqF!gq%qaQMhTwqQ{WqX%)hLt z6METv51voHxJ!uxUsfIhD3GKj z7Tw;m5~ddf$Z)PD;Oz7I+NUW^g#j>wialiYt%({S&$pq$V-OT+_^(-CXSCgvK`1Vl zX&j;i)AFJ6m+Gf4uHcGcm*xKjo+$;OGhzmeG*G$~tpJ1&257Ot=A$2a{HP{d7enRu zAWrd4vUm%QOMH2f5a$H)3=)}{W1^3z?2LqFuWv*x{lvZR{G6Cf=R{n45>|g%jrMPc zhw^@duz(>I=|)fAp%Q`(VbbM)1&~>k;{}<5K=9VX8nmZ^E?RI!PzBNiT;POQAQWJG z;p%)2(mi}0TwHXLSQ-a_fU%+GDPNcC;KvZJ5U)4dQn7f<+*1Vx8}0B877OfLiP{hc zT>sqN{eR`XH53l14i%3e(F$=)X)`U@kiROh9Qb-RGz z1XjDDzyTPEM8FOL><)o|0kC(n;kjM^FJK5TXa=keXbWhD1i?iRGF}ikbCLOa8?6)^ z7UtyVB2E(&!`?XTOU;?SNB{(|&Nu*O`nml7hkTOSuOP0+$iS+UC*=JieitylkF%j3 zBk?-0oY?C}s}C`CjSeIq?E&v-F(VQW_J|H|gh%@d;KeeNEU~u!{Bpazke6->FS&I? z0GtTGrWnpB75JV7z~n?D>=DnSeSi~;0|Wtl77%*Tpn%Epz%SA_g-|~X?2_GrK-e|~ z=*fvSf^r#cvDm)RxXwbnC_8k)Gr%DMm|%+M8p`Z!1L$jy4vgL`0NgFuG(^#;$QJGY z$AV`)ba!;>#;HQdQ>`ZzufLV0!xL0JGQx@|#0f6@PwO94Xn;fM541QyDHsV}puU=N zYwayEw8Wu;;_4}iYMS&f#O*o}OVaXlw_hu`0@5nr3iG%^7YP=ax5FQ^TFBjv{{W)m z9b!B~=+1?2e!|9~I4{$Ay$UQ12yXm&R0o-t4k2J515Qm&%wOVpD^w&Dg(iTc0h|~Q ziYS&bQxhnrs#`L?Y?rtMU@JE1?%L^Mrxo9p2ISiKqg02ko4yADC>dGtz`Oy%R(wD` z*^s32v~kF^(SlU5i2xY)1wq@WcsO|U=U2{h{j+*R1yo8;GpCjZ0Ll!Sla?`Gj8;63 z4hy^Y{9ukOfjj>AyVZ}36iuq+n_mRNMc1uY444rqe;Y4jLGcI#Gf#q~Rz}oV$|>eOAmCtdmj7;vk)v{t1pttd_wq|;;1iw#A~pAVGPVhmID(Y z>=y#_Mx*$6d2F^P@yv;dk{}{}e@|ZW`_}oq266oEMvPs zQW^<@0}A}2n$3FEd`M9TBNMfSLsdFf4V8thxvPc_Usw zP+i2hDWENn@qZWZ)>^!dk2pyX6TrsPVCU2jnU=AV4q3sW3NXk|gh=baUK z&0uen=~Y1EbSRTj%x#95pocIZ8IV8+G%`hs1>oQ$$gU}J%tQFaIiL_(+$wJrzxG$M z_)mYYiK^@Ff(GHAz7;r27yHsMSk6!8f`Ge#@u{Sp>}iy?0RZ@;WGuQp>I96?ULca< znS{YckUD!phq-0EwcGJp_r%Uu$Z&`Xhz2~)0f`bd3!#W`zT+G6oe5xi06`DG5&sFC z!LF1j!(sua{(?bhBo{#+dFhV&FE{xAV1m>dwpDfUSAS*kLn48};dnTAgU{FlZ~>qL zdb%|i|x{e94MGLErnnVAse|&RUypK>CrO_uXnnB{Mev_KrO;2T|Mv3 z#85Ex7C0rp+bkHr7Q%7y{LEgp<_zX=Ie;6uP36c8L3Aabi^W`9gMe3tAVYz$41<@! z)yo|<70m?{XcfN2qkhn*-+hIHg@haW`u(oH@9-DEEYp1M7Tm%iFxhVl|iMp||+oPs*|^cl!F`FnMcC1t&i z$IHRbcrJu>KbAT+&;Jf!>iN!YakZe9|w~=$lUa!`{K~ zAENGa-rF2VHsjeJS=5AMNke=|Dd1lM2(5jz^bCSxI$L?C9Pl|H(mQHapUrJ}9yqzn z4qGsy@QTfZC`Ar^O{fE)wA^TMA(w&&zJ8O536=Go^ZEWdFpMH$YoqO+rnZZ6>UZEI ziG(^WD&vg~C1>LFw5ZYQK@$@ELPyv?dS7k-h<0|SuP=Bm4S^Myt~~oasU(zr$Q)ns z(z^c@m1yIkZZzyjKs?}fzTR@1${9NsnMDS!d!MuBd;DX*(F9FnjT;hYR?J71m^)k? z5+s%>*#$;6`1p;Mu=@(oG$}wIkOuf7faD6SjXA~p!+eiq{D9qCp7#eU1pv~-pH9#9 zhrS=-&xEsiJ9cJ(kVG67_%LcmH{au4%E~C4>)1qrQDH)O97r0qmOofk#c@9n?Gim&+CFp-=v&x6&*Kz? zvJxRLV9rGxv>2ieRqE+nTn^h>2(%bD#IaCpACQ;)g<#k|!`vZY%9D+ZzNA0FH+pR? z>Meqn{@-rf8U;{@WKCek{OEG=^JK&S#*`AnBQ5xGb`44jR~=XI*AvF#+vf8gTp&f| z;NWHo11*XD-Nz8TnEp{IFTgl09I0NpX}2D6WeHuL$j(FrvA3I4N+2RvOH%w2DK{Eg z+U&xrmDV^VEOeY2HD3E~sMICQ=(?{6&13 z;V^((Z!de%-|6cC4uf`Lz9A;p=G5q{3y5c{`&(X!3oxmM>Wyn4wyOEFOq2%0&Hs>^ zJp>KtcLZYk2UeVlJ(33)AZS~1B>{Dkae;S>@n619{^$6 zV;IF<9u4@rTiL~nFp~9(99~x3ZJihpfP&nDhBC9fP;o)KvAV?p#T>pB1mU58E&w8d zb|iyr%monR+;Ud;(l97O4#A%ZaGO3)lonHr*@FSTIU-yxF7oesKP{I1ROmYkOfbLU zQU(RP?(XP{U#8bZdHhci_r5dqDvq6g-z4*w5#Z{g0XkW@`jZwA=U%XxURbIs5+vsJ-}Q6HDUKTqBsZ ziP2Dqh^MvP@lk&7^LhLe7lJ9hj$!Ws4=uU}z%m^mx`E)e!~7l*ilyHsuj`(2X#qd- z76jl?JKi+kzSg)7kkK$4EAn6=aCqThD#i~1)d*2u2idr|z8VH2vt$|q8A4{Oc6&K& zoB#-15IOl3zb7zuY+{^<*LoffuPOK$%}Im#z`cdaI+-a{;LNx$CUYZzS!L5Fvmhc@ zr4Z|WD{g~2AE?M5!Rr?1>k-VKOi^Lt~%BF)ZjXb}OKsKwkUD^O0%E-6DSjseE+ zQ)InCd>Vto17m=M4-3EQ0JJ6Wafg0?oZ)NNcQ9K;7&0Bk+L9v~`cGtVTgvJD;F0ScMOV%a#;Zza3yYWrj zPM4Hyn7wKMP=-%1Ehz5c9>DL?z#uhu>lW`VCnFyh_GWo@Walj$K zgbxOV1nU)J$tBNT_|sMpU^gM93BASD<+ER<0n2x8$Q&h@9s*Ju;>K7E3gJ|l%c72^vPUwK?f$&cOITYPN<1q4ThD!zx;BN1^AmR^7nI6McT zAlRTtETIrrDJw#&EO<*^Pq zUJ`^3Mb|7<@|SV&fEz-=(c$0?Kpn=MxR9;tt1?!Yn*B|p-w`m&cl^?3bVP6jpbE{x z?gelv>6v9*EGDAq)nj3TuF(AigC)z^-|{9eNID?-x;>NKNY@9*j>e)+XN<4+8gjYT$DY{Is_ zS|e-A>*y7U2SfzN2YhI}pH-G$z~~D{4hO+uimIFKd!^;`3JnVa0E81B#ci|AxIe_5 z?0CQ#U?Kq??ko0zh!X%Jz$U>i`DxOGy`q(o88i_X(|wJ>vEwa!ctjv2{7)D`>hgw{ zWqiB)7YT|QQoK2by7}I}3Y7*FyijLg{Af&SD3R+v4H8*|)y^gYOiyxH6R*sY+M(U# zoEdAV^N@9NjR;-Onnt3Yejpp|<6elD`^+NJfVh&1fwZRM9y%XqP|)d=>(6rxq5cy) zWsBlPFj-Y%ONb~Dr_OG<{;y}hJaIy*mg;>f%+NmbclJ2v4l8~xmj8->)4&pt0uLKa zzV$bV{c6&tsvtp-;frG5rU?nU#ZeClo%#;DOfs!9a3T{Ld97(pSs5(*K{Saz70ibK;t#RYS zW5x;y5qE63w*#ltSK*~$F=GQH@(kkxV$P8N{aV=&z%1%b2A2UN5Et!jVMNMH{WSF0 zU4XY>MTB2Sm`t3IxLVQhrvKWFJwI1L8)w(gjZx?xUZ|iVlsO#);xh*H5htAL@Y80F z?k(XNB1e4I8|!Q3`#FX5@q-Ma{dxm%G5}?OaS)gErc?BcA~S-QYM!^v0^k}2LPhJS zo+OZfbBa*3ljYL@M_?TQOmHFrsDuSzbOWf+`aV^t9D-Pa4t1~&f4OJ+J&_){z;xUC zER^){0hkNl^L-hylDLsN@Ok%JLclQ~bhpwd0F6b=qcTwfW<4eZJPqYno-^pUK2ZP^ zJ_b;&6qJ`zV|KxOE)@hf{g_-e{t$v%*V_)qiUl3r`*?IC?)U4#5_bQVwbB<4f~|C& z#cuOsW}qB=dju)Tnx%QG1OS~8Vnaf+`Oc1}Y9;=o;uFu|d?mm^+a@BMvEy`%nkC14wn-!Jv^>?5GH3 zW_V>VQlo-UgF?OCbLxN;*~1=6A}gdw^qWbG^BzT!wQ4 zN;+!SUfx-bliTG&1|J5kGmGrP0srr`ABizH$LXT@e zT0q%^0x^D#1JE=N#qwG_wBcAtq{e5*6Jk20^S zTmruInET-A{`cXA?)Qo$cS0A%L9i9Uh#ditIT)`GN^dYx{R(^2rNK6ZnH~rZV-ZLp z6ebu}LxysGD&l}FO(dP1cjVv(KxaVu)@mYz*hCOSC@RBx(kdmNXu-T&?ci*UGHHMQ z!g8-qC%K(-_=U^ifJ^}bBP^o~6P2kU$nqc|`PAGFDT~r~AP~t5Av~0DYNPK6WN~ zsQU2xtM@<4wh0G-IDi)6_c0&@HU#Vv9~M_fM0#_7F&9Rm=K0O9{yRAF*p84tp~FL)EsHRJ zV$s~Wm*oH{*jzUav52G;4+pjIB59?^oaNq~MS5vOnM?=-UJOw~A{Puz11$|xKr{~b z$hww9I3FQa9?JlgKx)7FXVyEN)yy7V;P?}iR^C?BaRR8v1cGrVDFzGQ1PkdE$qQ0I z#B|OcGj{R3$OhX8R6vMZ8SK!OWWD}Gn`D7US}`+2$|=$>M97@{i+9G=Jm`$RQ^^F#}v*gWJPE4VTON(Dkxc*r*d17^bO;9E6i1BrTa_c+nSk(RI! zIT1XPXRy~rX*z#S5O3Z+t3D?jC_|Ngj=ydRM+g1y@OJ3$^~?BpaA*+}XwAd%ZiGqhm7YDvC z{NLvnCjfX3DRS5T4g~&RflL}%{9D44F|SYFW)iF+tVb&+1xtJFto|m1bg z$(=g$)0U_Z3Jrk{ET<^ag~ntS+x`Yh(Eb=9|4#JVrI5FJW4EvxKo_8+2r{D9zF(cC z3Jm4~Ff=Xroiaf3NA~8E2dRCy+wueZRdEv+0pI1j$Nxw~9mxUTXyB{)oxfH&W&0Eu z7Ys1DCT3^J!Vz#AOkd+jvoZo9)nirFo3uXPvlV`!{KOl0>p<)bNG~=Qcpu#__#A`5 zknq-a_S3mOp1KhVpXiGLVB`6)+86?+f*3n zKtbsQ^wXulpeVFEQ+*6QK74l`sCd#>!2nPbR0(i6!4ZW9C^ajBk2n)Kd=9WWKLjKc z7^BgICT0VT09-&XIC+YufL6C)vuGSyDg6f|My^z)9b7pe>N<=t)`%8PCk@x2He4tr znfuEDiVt_(%{?0^!9sxqL*KzvBfZo2;}i?I9PHjsQjf@8?|$$0Y0NYV^wM_$e{=o3 zz4~LcuMPSzEz2EBuS{~?W-4ooVwGt`y%BC?L<&&x`|ZL1Re^Q6WbxX*^BSdsw5O(> z@AiSGvk#XLw`rDXMezw zeEz^Tkcn0?6*79u@61sV!H=6oy930OM>)o}y(*mwikNnv!vewo6nLP;7&`Dh{y@Oe ze+RxNzEbDE=Jvd+Y9U||5a40ocm1#P1d2=pgIdkwby}OJ*gya()swe94Y>$W9URFD zdZ*$9nUOXN`=;l*$BXuAQ~f51krm0h`*2H4JJ*}#`vUk-AaVXjR?E4P)Q}Z(jAbrr z@ywpjjd9;)SMLg@YWVdkV6PN&DTJ89_Gj*Gzg)F}Gs*t0jZl5V94QvDYT-S8*ktvHt6=A3Ghspn;Ru1N~FLb z9v0(?8;0V8$|q*;?&)SckDl(h1ct3-#4ca_2GABwaS0H1;h2%+uAni1k|c0E2gBgV z2|z*xgOIEN$nDi96^eiXtdDjxq{#h{W&ZR3yUY z7#18H0bmmgWA5RUpMmO3|7Jr@h=`xRQ*Yj%tiA-0JB&a_3;}Ec0t(#e9CpE+WUXy^ z8nCc9OoRA;K#G;=Q~&@A;UVW7202h4d54?x2=N_=_5{$_!=e`CymL z%$MH3%eR-BPyDd=OXiCfUl`4O)Di^J^|qYVSn=>PAX=B+Z?NApf90)g{r84F*WmU~ z4cd$wOTaqpO)Vdi2nK?V-Dtu7KB8OLAi=ID&iDHX1bf%Mw-&_$tG@m2@3_77_#xkW z68;9V;g5Wx@9T>f*_4)2MJ2#c3+ADznuer9;yg!)@TdR4>6E|ayGxvon2?Dt=nDQ@ zt9Y*ms977<1KSCZ8myIayvwx(U`+*}Lk)p^;A{lIum<2P6TF}?kl+AAeQHD)fsuW1 z5COR9XCD|UZGVs0Ame}vV1f7paf{$Je@+hHw-8ou{CkWL@L2N9xxfBAE6^}kB6ARL(mmu$+ z3SztU-`275X=RafKiP%>1Zb~7`c)PP#ti9DKHaf(65yvY)}GKJD?ulmjbJw5Sk(F= zrTcf^;gP5;vkU|O&`2(U??C<|#CU>uL}-ZHC=Zi81o-YY+{b0XgYqxvEW#9r3)EV+ zHoE8HLx5liS8yBwIh3eso&Xpe2Gkk7jG3zvB6|siM1?@%_&Oe{%!xCHx5Mj-i*nk6 zh%x~h3I>`X0;o`!^_RfYo*swb!K4DsaX4k5Q$<6CZUv4=Zlpp}F`!U9H8QcY^D#&4 zY0m({p$Ph22*`6y_YOj>fV${8b^NKO11Uvameo+2$0u5;PCLkbrW}MG)|dshPb&aO z?STDJ4nhx`|0j3Yj}ykgzy_0m_Ve<fNeOdxeN?{aqlqxx{w^-MlNx?k_Wt_l&N9#%^$%krB$~S`pMaSH9w> zf!8i^+I21F0vL!E;v^C@g10+nDv@Q_t-1iU^A9sS*Rb3hT;*#CEBYPF*j1fW24xb<~aU*A=+5U^pZ_+WNybpE;t zJM(XWJq$GX=2@q{vew-N1O*(!Fq$@!WVOMd5f_^qe6_FaqDi&pjSvxr1q8+D6#4K_ z8P59vP{IQ6SwcaEDV%kw30=i)<^e(}egP4M14+?+(w@U};Uq>cD&gG~$QOrmG zO+7U0aF3Z5atp7_7l9D~+9e`D;NClRX`179@&*8xE5X9i#<71FK+jAUexB`#TRjaRqk4#ZRXx>(#+AK~Ay`D9JNn2fGAPCx zw1Pih{9icBJXFO`#qm=RlEqA{i;n#d-V(Q&9^sHcBC*m*G3bI{ku3R1D%b z)p?WE|dj07=*{d6i^g_^!+F?#VqnF*+rnyd20?0FaF%!f50*~ zt#Md~1#})?&1SzYJc{xCdrl$uz4Q+N%yNY~tyN)}JQ$G2g*%+8RfqZ9BtLXYfsEo< z6T+k?S)gBs@IYJfZky}@Ab=$HC>5^QIeQQT#DS-asvb&gzG&B)6YIU%^r~ml)!&ncd%YK)1 z0RDhd0=q-j4OPt1#4koD0HQ{mQB`p+;EzD`4rd`)MJQ^+o(0h*Ajw&c9^b>mM73OA zFS;=Wd*7DtG%T`5oNu@Z8lWinibrjxYd_uYt@r;I#I_pfKd?F=ix3-N*fm{o3zo6$ zSy5U{eFK2jB#3>xq2O)-wT;h=`>OcuM1+m+F~Cv+`lf_LEXNbza$zZUOiWZb2{}?r zCGdUn@OSanRq!hUY8>#R2j7Z4Xdms?U@yiWVK=82QF;)*8}vBK(>vH;;l&~S9qK|w zv>nHNN!Jz=LI}b3zNBmBIQ(PAg_Uh|4~;eZ5cFu@)_9G9;}51!9(iOlUkfi{!AztS zaE1w(1Ia(@nZ7x>AR_FvGlMf?2 z49IBDOExEV#oBJ3&6`xK^~^ukKZQgbgVMLleB;nk<%EU5&@>KgV1W4j2j2khDD>`$#CnH_`G*rU9W$#Q3GhFpZp-BY+A_QtR-{Bz#Ff!o0PW|qRlV1Q~x z2deW4ugU`r4K5r^lMSH1b@;)=)S0ASl2Bs$?1UBO_m)!CC1ZolqC|xWZvvH6!Op#x zupgl-ZR<7x`hA6pI5>|mI`)Olf3XUjWu_nh=p_dwX1sfGd72^V_K-c|fy4V<<*Yvq zFm-%AxWE4C<+P7Dq$HxksKyGx1`uLrD#dFI_`BI~L!O8L8L$mf_z;-4ttaUe+maR5jM@Jj@oEO!yW7&Q0ChY1Z#p;~ME>%r%R_>5J=5;C(wf571&tosgF z_%s&~7Y<&6aK@5sCQDzuzEJtYKP}i=7L1tA6%B-22p8$K7~2`U zdi;8LSaE*EK-!dQW&s1E@e&YNJp=NtJO2HH^-!NK{J)369uDH7u;(3Ow!RB8W_uK7 z|6;Vl;G80PJI1P_^fRbp_dFPaRYyB@`^Xbk{An=Xk$NF^ek~=!ei65yB~}rVk6ceJ z?|^4Oy0~f%1iS@O9-7A?XXG7^j`xs1zJ1p!es)L%h5Arq&R(>L?gLV3;>LzkmP$2k9Z`AO^Wa zN5zQp^as+yAiy$W8H9H2=(61^0f`1d5q{uFn-^C5<7}&Ei2qd_b>fN|yH*KCHEDd2-nYiGB#ds2Uo(xqrM?! z|9AI;GZMaMUD>w4ax3naCVPH7r(6G3LLU+0JV)aAab^|Op8y&FB9W`ws)9GW`bg!Q z0KWjB2XSS~WxTdqh<*UT*uD&#LJXS{TBqvD&5!f@5*q>9 zIcR?V9u@v1U&L9n%1Q(90z!@v9Ay`y@EE4h1e{Q@;?{0#V9ra+cmWwe6H(BE0I~$M zA{K6~QL_iyekN(Wu@6)F)2YhmAhkR|%1)tmK2GA5{1;r^E zNp)@r0^u^hzpx1L#KtPkX2y%v-%48Mv0P!;q}Jc;R+9hV9$RnRfz^3>yNN3<24Ozk`Wr9BStu9n2&~&@fVQ zWg;O$Y^cF_-YBYgx4VoZQM!uLFZwcf;Jh4sCx2eV4#8 zKp5~GU=e}QaG{05!S+FazE}Xv$5+c6dei(OZwf1Y@6QRHpU^(d50`{WQ{ZuduY+JO zcoXU*4p^1S_9B?Tz&$sK83+De-o{^y3UOfyJZ^vLwJ1&`j34Nop-TOIA!0cQ>31C3 zeg4+coF_o}{IsZ>8>h!XCUDM<@z-E~7@eQd38n3D-;dGV=U$n;a3vgZfyDpc!NvT< zK#&kfA#y##l*q*xHuDEoJ1ho%C;Z4voE{$TynzQc#uprAu?}J!Kf_9oFM>s0y?X24 zQVu5QYXm}qpg9|YX?wXJ&MnX1q6h&t!wR~tgeFVzLcqfd>2Ak^(ctt-5CkCv5`z8# zN6N1S!e}5cL>W>PLqFzcDLjM`YtgnJSbSZ+hu3HJ{NFCSg~9ks2MT`;`^O>>R}^Br zAs%cl-uvac2to%4og^#_04W6t%7oCMKMj;~!((yJJ2tvGj}Tr$h1SzT;`pZVSN@ux ziye7;-}F8@VSL<7bj7V%fQj7!2g8M~W_-Y6ND=BWq?lTnpr1+?#tOmJ;Ob%yudQs( z#~z-32l`}7)uwervp}2%;xhj#+G4gH0t*p@3FM>Ct!RgrK30o~KM!*WvTxx$>{Ki; zj1z)m)}r)z3`%D#1adjzxjk%TuK0e@i+sZ zzcWeVj|JWC;nGI=KR>*%7&u?{$K}NsFeqSfrz{lmzA6eGRwT?#tAXC7mx|^O|4TMq zIuJWD?qnXlyL=DoxkRS#^YL-Sf0~IpB|eaoP4KYY+4eMRm5Jmdi^uo?00QzM=_CfY zu@FM>9L3uA7V%L3VAOFSGfb=c=@GLxJaB1$GeByv#)fX&c2)`yZt)}BmmM_zQ9b{v ziR;pJ>3C3wVS+8&+VqEFIn{F&d*zQ+|1J=+Kq&lfaqIkEZ27pHD!mf57)9Y93#E!M z^Ixu$cB)J}C;AF2fQ-q(KxaszTe}t4qI(QK5&o)zj55Fb35BqjQ5c4i9wHcoo(!Xd zG8)&|OliOL7*t{Z{=>nL(=GUq5e!Q*nQIk~|J78{0g!ei4#b!`VC+c5z6Yp|Y&K@Y zWI+QHH(4KauZsUw43#D3Tu-5aQHX?Sr`$=W`|S+d|3P4p>EoK&O&8+!7y1e) zYyAjNhY$#NdcTH;RdZuY)kJVbaCfiY84u^$nry`TCl%Dz@d*W6C*`kRoM3>R`<_OS z#cv@^iOxuB>UK}L~O2@@;{l>y%hVTz| z3u!magS1ce%Kfm5e>E$N-K*cfV9kbl)qnIDQ}Dn>9zWv&-Ba)V3HAt zk=_V#pj_`66aE>8C*ZedAT|D93QQ(Go$3VHgO-n65gk`1dMEr9q3vHo8?3g`yxRr) z&mKT!PKfAFw!|=}HEuWU^2Sq7zr?s^!Ju0ua-n&6EUq20MtPs^F6Y_HbERPQ3Q{0A6NZR(a<`{TD_;v4wMY$H^{Dci6X-aBs~v!n9- zTX(zo$xLGosyvF?j#G563~C5-LYij20iNa8uE(zb!7qEg{q;J1d+XBcE}w1a(P2m> zZ(e)cV#mLx6n|tzpXCBfA+ooP)AH;vEocu&8 z1w5~xxsNa=CmL3?H|;6TDen6`zJHs+=w?h0rg(3q&)509+aHmYpqQ+u0dR% zKf+`DBH_P%AH;dREWVgRnpEOB9$lg2iRQud*yCclUp@GAA%D!+sd)TDD+jT@G+$uY zJRD*`>`JvSVAwu1UnLRf$xCs<1@%YJIt73V;X+sX24*xoR_^!Z`1#Kdj~{#STw#AN zMh>0@fItUE2^hF!E0L0b14iH=eU1EHBsrL9U_(=!2>U*!D#lN#Xs0EraF@aA=97v%<9dJfuUa3n* zypHc5)MEV9hx~SrMuGYtKY$cGV2FtZeV=pyQ1F0>t#e;NuzbV+{RFwlJ8Ad^yikhw zXUpB7h&Ks@!XsnIjUmFO^yuvavjSul=IR+j#xAuNQ*!<yu-{7)l zrv7e1AeX1Wb!WrWwQsxqp#n2t^Cz!t>9p|(o+hERYzjG2qXSA`umxTMhyfA+q=2Og z8-V}2yHI1fvDyNjSfTYjWr3v`pdsuHDX9=f*mz}%g>qluMSu+dmn{zQHLTD!xKz}O zcu~8mmHE1H^sZwuieGJFlH9pV2cN<3jjEY{?wG_em-~ay3xY=p)u4W;>kx!tL{z`q zZ*9JSdkF#rXGV`l#Q0N$ny6Eo9IoTXNM0I#m<8xQNr2^BDxrVOxY`3CB><2GL0Y0b z2Ti1Zr_&H{M>}x%utB$1ilN+RIP2;A1fqhc%lI@2j*kuk9N&X8#ofk27!f6k;v6n} zPkp>kuuC0 zNSS7lyaAQ~A&{XsIEjHT4k*24rv{af(^PqE9b8?jn$0d@U=SeyNJmsV5NhI>Jbd1` zvMyXPSPn`hl9TQEQnn0AAEp~&G65)oK%kEuY7{ulbPTHH`MgEL)c;IoA^3w4D-pP{ zY9;GR5BXUe(?dY0prg1dqVnGedf+Z$@6u)a<{$(oI3-X<-MwCSg>-{lMFk7{Ox>7S!zv zZX`qTPG7=B2{|L~_6P?9Fj6I#KC58tHi9Lus!)k##TG7(Ma)GcO*%mi#qhinZirBq z|L9CDgu+Tl2lU=+nr~VrIqegR9eksD6y??p{`LKs%o(K;BEpuYQbrPhk!O74JtJsG zk%E7Aesp0|2Wu3(gqOtmP)wtO%)yr2aX#gY)e%HlN=(??OYhe*BC6P1oU7$1;k@v~ z1LAj=;BadwJ8{GXz|OA=zxk4;@w=G@bkPM>^9u&8YKKQ|YBV$PzyM$TLxZ5IWH@1$ zUSqR3-Q0sPUedj!NUVH7@%vcUwI%6RBn7BIDXvP&`Ynt=62KGTfWJ3_5;;&a{PRFZ z`g9J!z5#s!UXWjzqD_us`P2RZs-kBEOk8|()%0Js9+Q>*p&V-%t^ zm|Fmcze79s-|;t}>--A%X9@s3pa1lCH9`*$zY)du+KD}>*;7q|DE|c@Uw1?$Ibr;2c z|Lw|?uz`gkO^O#yjX6wM8W{V)bR-pKgB4Rcetr(B+#H0*ba=jP{XDOf8vyhHDrjtA z1;hr>#vOZGwzcoCn-T&L-|YzW{XPEgz%W1*K|<@rVzEaz3~dOAj3?V@QEmqwp?(9r z7CYZQC=Mfa)$;TggZbdj83*8#V}Y#NubU`DIf!+yJBj7nzWK0xwr{ZZ<-*#Af*cou zH+Sp-KY)lNg0`~&2LJjNbi?2VTje93h9K3CDB=t?fci2L==o_)9~Sy(QBQt-@X-of zxLm@ha_B9XNcN|tD^9Lisn}JBp?Y5}P)-y$DuPRgKxhY+&O$y-xgYb;w8{i3J5m~T z+O>b0xR9`oc%~N*ywjq*K<=Q69mO8z&lE91(R2J&<}Q^Gs_U%!w*DJJ;1uN2!CVB4VkDb#d-`|%bHgV;6? zsvf^nF$QPxVE-Ou4WDQBe2z=@ieW?O9}D5VvvY0utC;AA%f9Ydvjqh0XhC73ks+Zl zP%VJ$%!-7m5|_}@r(fDKXsxW@A_{u=9dfB_nZVBw&E86m*oa?L>|R zM`R9_CYS6Vfa)s|Vc8URPsieSer-#B$bm`3U^EXYC=ak^!s5hRmrDK>f*bazXPsL` zdn8zRK$Z-s;|CJtP^lP+b9N2zENhjfqd5kJQ3Q`;N(n|9{7f_U0c>S=ExQVMd>E7@ z;Q?!=4#tpJc^4*;q8@59tXt0i`mTSixlt(+{;61{wuKD@LM3BuVH!GfP+~F}eb6jl zgI^w<1c9J_9$WQ_VPd@};XcP<%XS5T7l3vFFu_q6m|h44eVk^|rjkpETSP+2u)Ix= z_WonQ#X{(qGXQ*rBp1j)u|VrJ^`qDO&EfIHh7dUyfwEs4a@%( zd5tjPfnf^zRY@x!a8K&kzb*=3kKd{Zfp{nIH`qeT$$TB}*aZ+i!62bRMibYrC*b=X z4p)3|Soa(c48|Lu=au|I!_V-*E*x@Dat(cxwg0|{%V+m?Sm@``AV(NJ!q7`IvEufB51+?*z{Ok1`oWHMocIoq9w2Cc~~8Gc*F=M z7>Mbrs<=3UIJ&B)NWKsSz>owmOc)0N_^U9$bKIjKjqdHEpNzhdf#Fm%5GIH2PR=(Q zl;=Fqz%BlT2&b8Ii!nAk-VOl35f?qa_rLLkfl^(R$hRhPLdu4aNy`}YP2bqq!VLC# z=s)Lw-Q`{}1Y+WM0gqVmMvGwvh@i0Ou?fX0UB1MwSmmIgd4`2K%B<{L;0h1|#v+3) z_)WdOSHLFWQ4r=92t>%s?e{z8gve0k=n_FBXjoBz3p|Ym2<4xPRF?8f$>_B}?giL& zg#oLCgal2O048`=fw~wQb`!yoi3{V^wC0x{EGZ190{|vJYyl>A?*0%sErSKFI1iRF zU##jYo9X|c{{!WA!r*7!0kuYfUd9ot;;X2*rWqYL#g*O+biq;SHXr=bFhUw0pLcDr z+h18Dh!7eMEtrvI46(cYzn1Zca>b9wlHPf1Zv2nVLc8z)01N9O>nsJiqU9fj^Rs0)`3Bw-eH_`v9nqZ-{+kOxPWV7p` z$ge^cU$EGI+KY5G!8=U#0kH`EPP|Z_3u&T9@;^nUkS0% zm3&<24h}IOafx4I#CVSp;ZNzO|Kiwyg9gFu9S7LNiXeT);(A=SlqM)ciwF>4QTKq+ z*7l&|kwS~w8Cyp0sAy|uVgFtgg+ojI_jP%<{(oWM*dCiJEPTF3B2ozWj{&rU;2_8& z0gDHL{?O77Hnkay7Z@DQj*!RH{HgUmiAA4$b%KO8S332#d9PAWqvfEv+DgU|7A;yk7zD544{rcqGNm*)gCliotCBYUy9sFKaO^W zUkw7;Ubrrl>RIsu{C5>}KP$c8_<@`q$l&fr(2r-cXMpjP$3ZMr713&?`0M-o?+9bFb+)T*ivlho86;F)Tes`}5vc$VjMwRKjQ% zYtMXHe+mhhEd@3A>i=jyG#s#{r@Tyy&v>Tq?7tj56R{%@PS&;Vadt??Y- zz$k{+ntw2yYMLmbyo%mfIERV=1V4TodwLsN#)&AdD?jX5-p?|+|0c!wA+RPsUoOG= z_cmOcC947zumYL^5x@!(3>;~<2NL62W7f5U-wPPaY)d-T|4*12RTZElR?$M?Ox8Dg zBjw1k8ewIJC=Q_TvX;_2rM70NQO#UJlwu~_PSU6jL@s<-Wq|o%fx%v*j)uudBS7<3n3UGL0?2toQ>tY29WMbIC=e6OYof&f7T zCPLVTpW50|i?F_)=oQ768EV2<*N^AAA4Y%p{e^fv;Oi&9#}y2Hq8`=zS_-caz=}DeTkJIndi#JaP-r{7YNztvqU{sBIutIqd|i6gv;4>q-t_7)kZeA? zL|}jtyDzyt<=3S-Xjvck?35`N&^?31IL;#CJ;Yo`4P|TJ-BuDz8wcoki5EI4bZGp8 zWFy$Q;v?cC;yMOcyg~A=VWq|tgo2U>_LH#R+ZZ!^Tug+nP>MSGWq{zkGPI&=_h_+! zg)RY4EX9W}r@H{G04Sii1~d?2edm^k@^CZ<(RO3~Ila6D!BGg8Sce|w)d{Bh8+Fdmvr1J&;@gRY%ch0y{^f%If2k>On4?|@T?^j;pv<&O^^a5N8;LKntI30Fx#(!1ozJSsT0A>Ky z1ucEzz1(d84B0xhV&7?9-YJUPfN)*h2}E!%`0VSY;66hKR!HR=satw6JyjiI6Pum{Ck?Ij~??U!Z@V56KBWRy?so5enxV)wYwK>dCGZ~wp%@mt;9_r75qei6YZ zr1-9T>h-ybnW=7}D8L?|I{+vkJOC`EEanR#XJroT1$-@seN-r%aL^#)`a{L^0xaxzz!j>P__&ZEX@|o9W&8A1RofK)jSM8e6j->R0eO_)so(;TAVbAq;6H)DNM#KG zyG#fqy&&awSa2o&9N)!WfDvE^^Z^!te*?scumkr?2bkKk20RW-a#~-_mGm6x_JsGK z2%rTg4w0CV7qthsOC$_lVi)JQaEM|zDx|2k6gDwL{2jggC;QCy6u0%zw7kertg2;EDvsEpdh5T(6Jhta zOOh=6-+u_*{_gJmG;8HGW+<@%HGYqwVgkZhQSzds8aIA{5uobuXl*%LqbzJOY%K8@ zUL0VrLF&BMFJb6%L2#IK!80Ln9!%wf+GUe$Wt6@CphJsxyHK0|Ql(%=k%0yT31;Iu zgrEerSEwa$jd1t?78o!Ef)Iasn3N;u;CR@T;BDqWPxL5k+oAIDcpgViS!?j1M95Sw z?ch3`w9lp!7p3duBEP8PzrSJkp!NU&44EP9Fa^5i1Udqe6=mfzK>-ujzP+I;2iM`1 zZXy`)Y6wFx3m+VS#!K zdZOP-$j1hZvgTh128A-@D^gDT_Nkf0h{I+K&HeC68FtX(&Qdvn9lqrGUW&hjTZ$bs= z!0)_=62x%pnJj3CaaC~uMT-n<`#*nr^msQx9GUd3NjOsLHZO4L~VC> zHJqaX2!|r(wasj>DD+o=+aKo~>y2aCY*&fP0;*w#eqc0(D3qy4E<1O$)R4Bn93R>2 z33+@Ght5#UaSVr56HWJ{nG4|cnmgGgXgrN<_=G4EwB2o`qE!d>+cKb5Q} zD~puGkZ4zSK#Y38F#IG8gS@FoqUdzc=DM z!~gHXTq|n<^rrv+ho!qnDPVm==b-PJ+kzZG;5$J@O99K^TwoI*$kAYoKv3I?-1wDX z`uFFn@*tU3EAIXX-u~i6@F0R0e{hg54UAGExI+EL%Fa>B@4q7{H{&HZ)NlO;d;A8V z3UOh8INLz(N3>EccvMzz@&5{jKr{nS1I$z!0BDsxVt_mA-<`Cd?+PJdjcAXf_61fY z3++JrYSJtv4ukwzBDgr5qLma>!Z6Mp{jeO@Ok2f!r3Y_pxA53(UihyR?8IV4a^*2b zR=9&%R}&CN{`(kTnNh$0*dRXEz)^|^Qr+5WXM9TC3EDx(3%M-o*4Um`NdB^V90 zOk%{os;g9=d)pKWLBl9p-uP^VTfY^3hV*(2#Bm-K+rpNvBH6HD$BlN z01;DUiGIsFw3ZF!agBclp=A12=acZjpejT2N8N1pxuCXj@4iO7!3(1 zIam%#M}zcJ+NRQU2OoeJ2nES&UI5Yn`We?i&cA%WB#=fdprDQ_;D0GQ^v_cfGbtUX zjIlTHJM%*T@cRi)L>==vA2Y}8#bCwHaBy#^IWjLLBQFOPm=Q`jV(YDbCd$SE_yI}@ z$nVy!dJBhp#dlv#J|PAV(cQca6L4`B1Cr_POXN#vo?39EH7uMf7!d&t<c2t)+#k`4ez-+*kcz?bwUnPTc#MC( zRMG_F!-5((T&4(s-hkKyB^q_6z$t#y={HJo@3Gbi?xiZm(22fjE)tjhybQp!Yj;Re z<*!bk7d42Q-N?|1t3nj^i+#v+zb>9E)bbi7|D z3gJhv$1~6CT^$B-x^gmJd%92t`GCR}A6iFD2e2EnY0fq$316od= z%6iL8WuwJ&Bel0ZAvRDAxY&VOXQD4_~@z&4_=2g#zXW69m2wd>4v-PC3w~ zY@I+s1Wpx0l|j|{;o_G!Soj%-@q^VHu3%M+F?BGC8Z){?B*IhKiSqL=&Hip40bMeU z>jxA?#C#p`6a1kf#R*rIt}#wmE-&`lmDFYc+~w1qS&A zSHauusJd7k{4>l%7ys-L25z14Ofa|xo(cEw0mZbV>;sJ)!DLa6BJoU9;*I$2mhnKt z!Qc)oUMsbYd(Jq#c#fRxGoR0{(2bX@igu5tBM;fpa1Zav5lz)vF+U0~kMR_W;9uCh zKD>7Rq9QU!fNCvyoq@nD9#_!{I3tzufiM`57?Cg-VKC@mCl%UB+~&V-g-Y=N@W@rY z0MCGqhA9&%MQK?7ZMm%s8xX)Cu$PZihhNY0&glWl7X?MH7Yl`qusAvs7~jn+7mx?! zEYrmX+wcdrC$~F;vqez3)&mz2m+iNxB}IlPV+@cPVM7!(9#Q1#fFghhmkO*O{IGwu zgTA4kb1EvJ7&cRtK$8Gq}4@Y$bP)aKPz6fDJ&8EfPos1p%h$rbUL9lc1an=RNgB+-E z2!Oa0BiO}Zw~G{i7kR~Z<-fS}&^VKE#Q3L}m7^6`!of0z@PCEN z%tLr650>OFZ6x4>HvNRb^^ib+!m@`rzlR!y!1&zY^sDvDuf%w^uy6lS5Iapa3E;VW zz5-eW=x_l(fL}lpDA33b@(7r>Z%pcFD|c&H2E6?_@qML1^xAf_)iFwflm^kEInd10 zI*v4md%(a3G#m+6?Kh~fz`Dw+s^AbH1wI*ywDJ#L{=g?-3OL}5O5otPgM!`;3Vfi% z!AHJ!SI!}PmGV|De zK!v4uFWHjvpDjDWs!fUimFN`@89N6+j1Yws6}EF;~DO3r7CARaJ-v)+hj_ApR(6`Vpn+B~^s}#Zn&M z$wk2OPu?PDUb=tu{>(pxb?8o9;a|H?&Hu=Rg$y?UB!D|_0ARqZ{)Zlct^y4s_pRUn zFU5cXKv|iP#5gHStxox%VV(l= z|4{kz|M({aJ>2QRn31u0$?pC4bD=`?{NM5ZUHpN|`W~Se zHx_O)IoQ+$bMSHz*DdDp75=IaI`Ibu35L3F_r7!J!VEy3Q&`r2Q<6{g)`C#D<9WZA zhO06{;o(rnysCM)8`wMcQoDJi3r%D8`s9J&BVS!K*GEa7Etx45JuTcpMVFFyNH5 z^i4?DM8B>yM#1bG2g`01X+|`=9jD_24>1J99K;eUsSnvK8wbRAk3uLgEHOiy6Kl~S z1BndKd<%+KEFUnw8l}?JoRoyJw$QC87g1k6-c-62xnct69-K+Uh!Hy3*Dn zIk0RV!LWZZ%umpb1JFO>A&jtnHSzHw@h+;wLGdAA^bG^(IlWXfutqQ-IDaatNyd#E zB`E%3m6#OfsNsSwcHvl2KdSeo-!1b3P^m=NA`Ak@Qvy)Zr(ONJ5-h|Sr-Qyr*Y?mw zxA-J^wS_yEj@hH%;2%I2!We0%ACi1v^Zzja{~&s)qv7WN`#}XW(gkxS^?BUH&O##5ty;2BP64p4#_lfg?lT3Z+&H|r z^YrNd+vbFGg|S7Na03TokGJIAt@e0jf~G5Xx2EE~{DTQmT2|gGe}tz*t$C7IPC^eS zZ=>cV*@7I|;sY!hi5!#37?FuQlgUJRhx`A8pzV+xRm)v@zyIOsNfd~2mZ0J?5}151 z-_r|d{VXq6-qvH9SlmFIh25Z@3JtxLh3G`5!d>CS9sNb6r~JIV*)^f8P_Uy&D^uUb z{iO0@yY~>mYgPC60!hb_R~g~%l~lffz6wVV6zV|T*{>~OpypHra84IE!*L+~A58rJ zdd$oxNu~aWEn!u`BFoi-CEhG)upP4)WXgHJ|KNgK^JAnt#xagOyd7juKvPR_9b#%g zq@}Dh^Dqroaio<}Ch&D#icGd$Z;!#vn`QB{j2LqSf(RUy!;<_Q_lGtI!t?CHZ!V;i zq6bJJ3@&3H-i{Ms`bu!7k&Cx*&e8GSIhSKfB--;X0zEQ2pgqKmPYa59xvC;&5~GRc#R=J z(+&h7LOr$l%l9oxXYZH7V;%}*9>k!I{EEoacVW7{LM5e?FzJD2@-%w#}L_Z)j)hi?D1M`&G=f z-YTzHKH}{#byzstReP)ei9rE;Kbn@F%-1*C9?cdI{Ak8pW<1_x*ubkzjdWc(H}B1o z4y6?!7f687O6_JBZYqT)An${lBG5f?0OZY5OssI>Ck1U+7Y^>}Cf*^lIElr0t+KvV zfl+`GmT!2STqYQJqz!RQx}W-2e51$y#n|nf>(mT6`>yq zzJ9^5a&m6PH3I=Oi%6tBA+(xVAANEK5Mkrqn1v#>(GrrL z0qo|`EODUbMM}vC`|VL?jnr2R5aHpYn33+(JkVI(O3Vxav7^LpNe<8Au%#yeDzIEDuDuXK zU%wLy2r$=v>bzZd8P$|0kE=@YHcAJ}{r@W|cAA)f{9ltJC-ljyjJEf43gHM$v+w`wG!Z=^_OIUW zecMj@F``CPUcKwzz@0Yk^AFeO3MLjfn%CYaobSV|k(dwVc9|{2qS3?L&vsKdNbMoeV>1b5v7wpEg9(Y{Yha6Kbj-J^+$~5R`Wz zMsoRT6|0XjISy1fcMS~7O0|jbNUfmjtM|cp!G(Vv-{Wk|M)6zkMY;?GhBn2j5EL8D zNS`4Ij4t9;MH+9{7(XRmnAj%J4@+OwDV$fuqXi9>M+yypBpnCA<%>T#XFrX{0*%>kS%>-hz^7hHQVYiYayNMII%J+_(91`GNs}8^_Zf zq}Dde7M$1VG>&Cxp79(v9|j3zH{f??R1e!xqte-x-=dKRkN@4zz4KR5Q!-a#HXx~H z1Loq~K^?GvLQrA{JRhOIp{DCO)BnK*M%65U0rM_h*u%tX7&jU7SJ~z`TE)EXA-^b#! zv0xH-R2T%V{_leOzlr!i4vXr&W$%aCPb&&TLB`j}lE_tH7P0;7D*7Liw41_L_YM5wI7!{~hU9w;l?> z3c$h)YT5LyEVv(1o#8cAzdjJv^D2Idf<^=^I`*3Gm4mO!X)bRmJ}>~P$GzH7!@H$| zu9JIoAV!|^+6!dD>Sx6?cIow2iFvc~VlrJ9nGBU?$HDCJI7#L`KtEtOK@i}_EXBBo z1$J0ruP}nkmas5|!Ps;a17?&w69@DXv_DP);LRiGA0mMh}SNhQ@ z{w<4h5e178@B|&;`iLDik3`S=cL3goAhNaK{{f%Nha^{_z|;uc0q`&oo0wA)tkzP~;V7%V@5}MvoH5KwbeDc#7<>{3esfA+n7rYkRw{4S zgo0ua5^-B;bRL3W{((gUP>d?mXka_uvMLljYCSaIlpht`+r9$a|L$K2pS3y2a8BJ^Ag zPL?PFQ%}toxT?STr+@^4TnnvJ0y~cT0q~Dw`!`>x2L#Cc!?f3XHfN+3x#KMmU@;i% ziLqS)UD!Y=F-L0efy1Y|`&wdd6UAf@PwP9*%hD;#+7FJM!8{1ESs z*Z=?uT_Nr`1~_m14^WI;+Uh(|Sq-nzwY5L=JY`x_4;kd= zV!98;&Q#7%^i{%rZeE>o_)C>Sy{KJ_T)$osVF%T@q$hc-w}wHGo58fZ+4alh5EC$} z2#FD;-_I`wt1C|pj1;XqD*V{DAe-|G?8Q$NF;lL^`AJ|o?pSjXh{t^q`33TlDJ-Ol zN+ZNSXe6xHl)F;xiK&Tm7{j;o<=^2%XP{^sF8|OSCEtj7Cw%2D{}>57-!D<11ls^p z0Gu=wqm0%rYDeprfa%1W+`lVJl`{V~n3wg?hC)N)Tzp7;OTug7BMl5C$Vg$Y@fc|I zX!wt*1{8f7JsLlRER@5bsBG_X28{vflFZ;bBz{r*5Ix=}0IA0V&N7*%Qm$x#fKYQo z0w9>9u@a7Ue32ziv4()iU)nSfpo_3_8ox&de5Fgr7rK-nmOStb@Bh>>iq+^^R@dbv z?3xGT{4XUWy!U@<6BZIn>(S7#fWJ*e-&3&wrQ)}{zMrzHWEeOd#zQPvl;hAi34ac^ zXF|wm{^eKF!Ej*0qJczK9(ADE`O_KC0B`|?ip}5q6DfoKYwRZ8MtQ#%z+-kj|3tge z*McR8-x6UEen=mZM+_g577iLYhu%KWprywk@*&H`a`>Q;)^ddwO}nr6?HH0arh3c* zP|vw<-o5y}GvKYeyWM+N;^UV|@4rz3jwGEG{PPIV%a{k^ht?tS9wVU`QEMx?+Zfmx zS!c%y7cG?tXl|h zBK__94SMjC)MnfYEU(c2$b!p+b*>1P1QQRehNhSS!+0zSqnj!Y8Rze5WP-R3NsVs~ zUU3`_{xb+JL8yQ@Qbo@efa1Il=@D!JWs(ojkpG+@N!=}&9`2lh?(vYtOaX}Q0}a~n z9VVpK4x&P_(MS*1i^GU)&{)M-uv_91{YPrWhzLAa1bMu!4P`hXJ&^oHX!CF1epj_S zQ6x%10w2{2Kp(jnKqI5DVxZZ@L5hRo6@!}}|H8cFx8g7~{IuWW`NF%24=)sDp)%#H z1gS&p1fc+@7A$TxLz5rg2kU?iy|?3@%;(9z&(r1T2|__gBm`oC8wEi?RG*u^Hn*hT zlZ9d3OS(z*uD;*8A%YDa`ir!p;{4^UdEyAFtA8-ip>64%pH*3*ZfBvw6|Hi#$prf$oX4)yk)j4ai#A~F!hC5okUA0My|X752ZqOesJy=as3N1-i#Qa^Q4|j{}7rB{#;0n70LiDlQLjgbw05XbG z|BCU$_lE+!CUFgW`f)?Yb>Cl_BVKwhW+BfD0s!EH2;bj!1prmTXdq^MZ&7`wUBs`c zx^@qxGh;gySz^L%HTI>}`7o&k=;h9qH4BE!Ph(K_nwGp$ygw!sc6#_Fu*Xa;tf{zA zNKhtm1L07VSolCPp95dZ(m#Cv5WcCDt611cOO7B=hGm}lalX-FNX1ujI-h0GQQnF` z+1DfJ_bm>1c4gr`^8Or33YnKJ^eANm_PvM*z-%D$zlac{O&>?h}LRB!TrIqU9i!<05ooVEavQO+$eLjnLK*=79 zhoFU{e#L-EZ{tT3VquM9KuKr}`2K(K#O;QM#n%|RJ0xiA8wa7Dha{%O@&BQMvDRMx zzXuCr4QrO!`=HK9A6}J|oCU=p4PPwTB^sQW$H}y8lN?<3x;l@#(npJ>6e!yZ>K% z(2(PSZ)UrEla?AxkfPQ$nX=-j%iIoqGh;qWJ2(Mbc!D;f7sw(Q%YHxaQbJ_kz+s6I z53<+Ce3_QA+`;;MX2#G>w6fl>7TjNTd*6VE2xcJ*MPeB2Rm<<2ZEH99D>xXmGZjF8 zd();aj?kuo>&vDOK2x6Ss z^;nKn2!@Cn7sW>aq~bMwLC`+BQ}Mgiy+g(1RJoxq5%2^(rEiv(=r9dlZe8ylVXJ53 zdE+e&rO(*=eJsNh06@n{$2IWbmY?8|0@vL_cKlGq!1TMD{}96ncmklSqXaAhV*miP z7Fcq5Etvz3N(T)!q4Y1}tX0e4BO#a_SRG@{EYj{Oc_8;1IMRByfCJwP^Y8@VcPckuHLxF-@mHE7|Mo7|#-h^P2N5##Vjq^9jsRRB4uj=0 z`uK77RabxNEzR)2F&ze8{oNAPgSaP(#DII&-?RT#Fd$(CAkb>eC}FL*P&EIWn|j~=sbx2eBS?Q7Oucu+ z$SJD1X|rc>fe?T|_#JQF4<1J&PNHbR)Ss{Jne>B`9#AtAqouz5?k2 zczf0C|8AVjzl<2o%joN@zxWXtBu$Cpm!?&L_yI?PgermGF)fO##VAVe|xnfPC(N)7#2c zv5KHhj27r%tr5+@?|6hUb+by8!$ngfEnOawXfSnVOcw#t(%9)_4)?k56i}yp+AL=k zQHzm@*1rM6XBp=(4i>KzatkKdbbtf>5H0~(DhyZQd(Kal=lXPS&(s!%!rfnk-Da zWg~0+YXVPbuENUv__3Px<2%MsV1_TbSFyGQF1d$EnDn9|yB|xurw_b7ku+*Ug+H@% zCcozYRW%9df0$oY)liNospWYDCF4~rqW`(^E2^rqn$n^JO-L-?4P(Cx zUI}{Nyw&yd-I{;zT)qysElTnbEx4>#a3ES=qboxGwa1|Ewbpj#zBg6=RnCZ=5=G{G z-~KLQMkIPcJs_BoiCM`6Ul+$qJ9%(RN`qWp%=9qx(}&yXq^cjq_H6cS{1j{%sJZjo zH~2?d=OWIHgP)8xQz6xrbv}e@+ntIqYtihU{}Oz1?|ACJ_>4@wmZ#5Qa_zE)IY%wL!lc8zA_#{AN zq*;)XmV;t7Eecv95Mny94=V)$k&%g*P{|-D|4(rJd^}#hI4z3?EFO-MUxXD4>{rLv zyxhHOg#!MA(_o8j|3y3!eg6Vx&iwo7(mQZFBi)`z_`0So`Flt_|hU zF_=`#ufL2DW3huBF`l5822x5HC}ods4N8BE6W;s=GWk1j8>oWJL@&ldagFWZM)zU( z`l*clM&nI1NuIp5s)Wr^C0_~{*}uXzvA-?Vs{A;< zB5pUq^y5R@?D{iM9;$Zy{Nd1eNx+8Q6;FMK!Y|l#BY2LE-vv^L4L1orCXI<*WzQY5 zrm{2Q{ZPRLDNW6&Yrp{tTE%J;DdMo`g-23-<`*^w|^8s8&%u;mp_PiE35de<4g>>Ld@waxE(?5 zU+3W6jSaVu<2in7-&Y0&fd4C2-0qjP2v;c|`%@nO56RAd*Q-13ZL%HyH28dFvGmR^ z>6|Udt|CRk)QlLykcf)z48RGn31f@v}QY3yC^rc&y z{9JyPPdbMgrL0<+X+8Z01#qH(7+?3do~2%D>SF=?zYMTrrcflZM$xi^Lz5-+E-PZ% z=l?hO!xur?z)_y@f_q-@4W>bgx4TfbthtJa{XL&FmLt>uE|fLB zoU#NSOWSVGPH**4iC>FnO>na5GizJ_%p$M9rh+J%^6yC>2(e=_x_*tG85(Y}JV2fx z_5PZu{Eqer7h3UB|UmXcV4@QDQJR=|z>5+uO^gwuPz`TwE7*n)~s zZmSCa(};dFqWmcP+0Flt*jItiG&Gihfz|pH%1i%^JwG_UfuMM>WhFNs$LK51LJ`Ar z>bb8I$A(SY<~Snl>xvi=-_?7|SQr80L9xU+vc$qqR$kRZ$BW;A4iO+)q;RdI>JgPTVb5*Skw8a4u~%6|&i z^%Ol1oa5ZI-Q!t4?a(kp5;!42uziDI{`m?xA`Y?$s612{s6PS+puWV&_yeIZh;ccg z7=iWgB=TG-^);}+|3WT8TM0$ru)et2J&zt87hb+!j3~kn1R(%fYxYckwlssm;O*(Z zzb)XL2|!mK1as$OZ>eM}BSe|h#5`^PJ6G-c1w#@vjy5ru$qubpJ7rR=ggML~(CdQ7 zvafqE3|Zyx?(Mr> zjIBiS_Az#0cP?9}!XTaRvFPr?VAw7!Vr)}vTvqz-CF!32UczA!2r@uoBw5BU29KN9 z!Np))Szh;udmOiwZbyq1VzG4QANYL)$Ahsbsk2L?5C8Z|4#bCAuj8!~fB(W#5Ny4l z(2F)`(X(ca9tJWS!JrA2TgpI87z7aju;8E~AXgGMzs2d{5rCBJyS~F`Bfay+UhlnZlp^GirT1{kLxEcHpytRZd zSa+180x$+5$ZOc~1Ju4~3$XxCAmH{5(5+>dxTjMT!w`n;0hX~^N6VL3{etnu4JKri zzd_U$5fC%)zSm{#5F{H2LBS<9-_Mlg`N=5OQH)k7+|;kmSjk)#-+6BwQSoAPkWqzm z89y&LXz@$&6N@GiGHLS=LEE#`Sje&9IR7j-Z@Y$bp$Ay3Lt@R36p{EuJ z^#@iTpgzYK@9}+ARe6L6b=TqtiM{u~i{E|rO^hojuX`<5F27O%84HJ*WzAEl3mHww zxvTE`Ez5Tvwyu~)H+O68eGlf)I6ftfi`k%PA9@4yY+ix8l#cS=Ndpm74vG|2(gD>} zKB6pxtdL!~?zd`7V=B(9pI9NzEIjK$?6>)@}i zcisJ$t@Q-K0ZLfKOaUJ|{%8-NbzE|Nc*y#m%eq6ihD3ys3r{Wt>@}BR6 zFOTKADw<6t2#E-Do(U%!;~5@?Epm-}ogefrfuMe+4BY|X!uO+t&YiYB3#IvTl(7pJ z7z;g<6P73@IPHbJx!|#sG6RX3_weK6MygT93I&8;uKW1{E{pVS=z0QDG$J5`pg(B0 zqu~A|&>RQm!(y3*1no=5o(4H*eoWH@(1K3*Oa{(8-%^+3NX_93E+M9_pEDD5#7!+B zSUdlyuZ>()ltJ+B<}&yMC=cKqFjOA{@PIu8J$SRn;@tq60Tkn+^Yyk|yPiBomcEA= z2jJcj4VM2sU#(~2k{G@dyE8!kPs~=mQIJgN_g~~!^AIH3?EJgEr7Ac(FQaFi7gP^_l{7jy2x@}P;=l)alYWw%VrYsieYeab6J zmE1PE^Q&GDL1o_j903zG-@`J0I<75zB5McH=+XW>j34YB1NSlL{~ypj^bgssn41fj z;bAnnUu$34Y9B0WnS*)n3bcAYD|h`q@SHf~W4ohA;AMWwzjH(O{>yM|(yGdlNmw3>5>QgXlik0>}4geKXge z|8StjC3^ETJi*jM$E-y0W3|n#a)jI z0|EQqhtu*(YW_pN@K14Cs;Y?Rwa`L`&-dofBI;;EnikXHOC0OR6_6htS&JIq>s|^0 zK>bI|@xddHz4EGpM+EBz#25fVRyqyH)Of&hroDfko$9PyhRb(=^#d>ra55r*$O8F9 zg2Nn_!?|KLQ`I-?UuGVvpF|8Wa0N6t6BeHy)%BOW2xqfrQkJ#;#TCFom?O9NI09_I zI0*`9y#eIWS!#g)lN1;NkZ=pe1peR&dAO4I;s70_ES?R-TOsV?TP8IeDmwnSuwJ5J z^T4Vacla1X0($p+AIs{d43lH=W)hd}=m?;1vGv&Y(C*(S>Gl8s4Y?ulLz zw8aAhqgjA5Hu*2-e%-Nh1W3taSgP}}nHRI0{ZvWTekvRH_|_$)_&OX4h5{kLnVSXj z+0k3_8y66tKgjt=5kclBI;ZRALT}Y`f2N@t5EfSXvWBbwzg^0bxpH=1Ap91bm@2OK z2f=`grT5~0{Z=LT56OH4BlR9wnxB`j$aFq(j};bSU5r+jl~fk$91j%3+$gHpR0Ebx zT(WfN_;)RJ!)UG8!lJs3FB>LLzF$;|P>?(>&)9&34)d2z>0e{{Z|pBIN2spzq@k*1 z$fs|c!G{0Ud|##Bfll*(Cb7*sMko{y#$N@EygazQN53 z6cf;)7=-*gS>R|=OG7BS^@K^O#|1L=&T-@AW^}ybfgG{6q_XrE2jfgI9bxcd5J9C) zUVNIK1H6ZHh!$oiL(H%J9c8-+!f=ZPx=4M->-hr-{&rxiCO%7u!$+e>#CrfkaXh!; zTtUPfP((n;I~la<4cUfTQB@-3tvF(klCoPXV+iU4s)ZFObf|Kb6q5QJi3=L9BzW6P z_rEK7P&9j(92j`OfV`po|dL@dYs5fnVzc^GR7D?=Mku0oIvK|DxG_6 z^64O5g~0%T;9Z0T1RgO0fb1rB?b+@iUTQv&tHb=urRWh0E_5!oH$#c42oq@4e6@mj zCW2++DFcH@;Os>&2(j{MS^?edeerleD1KoAy$GOa9)XmCu|z3|%9r1DLNsNgX>!rD zm-6v(_L|E{1FS!9D^?}Q7mbC53ZWrIqsfqc@eeFr-Pw?U-4`R|c0)vwurQfdT+h)u z&T+o%zLd$y@I(}=vYzR__c7_Cxs4!#7J?UOkhcKs&RENdM4z}(S2%kb4+aO12ra&* zwGFD^kDX61XUc{=_IpXS+5Vk2bkaJj4tfNoF(KBcB|*56iBNPLUdglQ0|5L+l*9l2 z66_zNmgqUbL|hg$9veoC9C8Ta$SC8BAcKxU>jpGH4$jn~_2|FTm%!gRJ^~PdK)4kX z6$S#31E!*;57ut6Gb_BKwo^w%4C<8{FSM1TWW?nc!J1u|qLe%9ecO+GcFWHeFQ9a~^jae&C* zMhwO3niUa6N=?Rg41epH#l~?sJREczhL9u*fUL9~=t((l$HB$l-a!X!A+;u2w+?Yr zAou>tU)=rgH3X#4S0V*8k2INgc{7U%6-S}|0DODQ7F-70RCi^yb285skw#MDvf0#C zcN_9&W%Up4UCtN91tg)uh|dkwQ-!nEvPF$R&^8T&K+rUjset~=(#?SN>#{Q1S@K{*I=X^Rq@=L(u{U4$87?TQ~|aDJ`~hk zTuqbTh-sz0@33ee35AEE)77aLUEYJJ5`h3B;=bZ}?klmOKtv_p_^c!osje6xX~lk5 z{_+rF8T7K^XaPJuf(y=$HL^7mhVR-;mMY z1A8q4uzetjH02^Yr?=&#gbYUurrd(eBongKMMB}Cmr(ciW}%)&NCWnl)k0at^RK~d zIEfN5t!ugeC(dH^{o*(EZwx3bxHsd*?E4Z1r4>W(t~)Z7x(kO`=8QeD zLPJ3y@)K>Q$~^bcdVzR%;^ker1I`Z8? zTfqC70-7^f`2{r63Ae*a>lG&PlBUK=y_4#)-^9VDqtj zOoF+HCLr`Lu7xmeTW|smfMN%W3X}kYfEltDVX6$?CoY>1S-D)`#rbBODC6)3SDIS? z?@*9tL9s7=`P;bvfDnOxzTcVtGz22#Y%D9~w^8Snsg!z~_XI^RJ)lU6JPlJ=rANc9 z|3DnD6hUBO3wOKU{vm`AmmjK&s`-2{55e%VghhlwD|gTR^6SN89$zQmh3 zFlR=}r^pbu!3*f#q_z*B#Zm~#zcm@`+2tVN(5u~1A{7!A z!RRP(uzW!BGr@Y{Vc@5I`u@~#us|2ujZ?riYfa<-ZMko=}w8U?V&zFnw9v#>--#hhGw z*)!0e))FwMd*7H!#qx(BDBr&Lf)9w+5I4q(dj>` zQDi{SJp&;gU)u)+XjqgFznFjH^cApR>_=p4<>)$4>jy>-At+o_7^&h?2uV7~V_ULH zuLuJJoQ6wb*b^Jl+dUDhuWiko0{v-NMW8pt(82U6Z=mtZf=s_O`A8XQbS3(HI^Z3< z{;#f!f9(FbZwwe5MU?Q%7&9=@fFq6&ANuz3?n;&SzwHpc9r=2+P-KG$ViF}Tl0w0Y z2QHQPc~uJ|o`fY3se{=g5oOEMa>+!DeH}a>k{^gagcw9^zp~xfGdI+t@qALbG?5$t-$ne#v67IF(wV9 zdr1G7mbrA3TB@TKVqK&eg!%_&!E)1KwP1*PwmcC=0>S~E_O>p({T40KecjVyS)ZB- zZP@4!qLQHWk*@cBn3it*BtcAu13p*St zM4{?XMg_q7zXuWU|KM;{ci48?4jg~&C*^tmgphg1&u6DkKW}d^9-@3l5FmSZ;NVy( z*1h-kVH`^j>?KdTMcfq9(-^pb0De*jy+7xKJKihzmr!K}bPI%r7;qFoLeBgA8BaSG zb=v;V)A!?g-%yxo8cZgBs{NL)Lc)QugHS;Q1xC(Zp=4Ep8FA_1z%1C?wdcIgMlgbX zSpL>H!KC83@eoaVUnvA%-+6m6r!o*=iGmJw_}yQQkdZXcgoJc9nK5P|a2BAr0hUQX zP+5PE004j=Zak5`oBZc(v)M&Zs1)1WtsJ;L{>i#swH_?dCCjog)$LL09haYJ6x8EtlAGuM#;~{ts7|`ein* z%;o4jbHV1hU}xiDQ$GQqVvqOl%@7600A53h!TvkBGq1*r%8hOqBco)q1zXf&wJc_3ZB11Y!1D&_8N0!An)7k@5MnDIjt1aqxN~JrOur zQ*K63*v%m(2%8dcHG@!~6^Q7t`&(RT>!YqgbbeZR(OS~IXf}gPS^(^|a&Un3{Q?yjdA>eSzGL%p_jT2Wiz8JR*Dw2mC?+o=cix99RhIM*7!U=T2KENgSpaBTkm2FX zo{Av?&?*Bo32yjf<%AP1iHGGKR2cZ%g^RjNseQLN3+E+q<$iTHxd+1al_bwqiOAx z88Ogll>~AK8V|jYFhWkSbHp^guV>42-lSOw_s#yAh&sfT{qaO75}e_I{nAL25>_=K^4FNZ zkg}lP`l@63e2gRY2zDQW9|U5=8)wn*`ziOSU&*DGF&BGY_P&kpS{glHPw-bwDr3q1 znxP$NUqH}313>T)97C9v6N;gGe6Nt*Pbf`ScAe#SVrs~Dp*Q*+Xj{u@9)X~9f8+KQ z;5qtPq_`NaSzl=a=tF-74$x8BI(Si1|Ng?GjT#>z3F8@S9%A!ToF(_onQorWN^tmq zn$M~crBy4AFI5pPsG68&Q*h%hj|W@-LRSd*MqAnZM$cx?*LZir9IruXh$#(3bTEPJ z`W-o;P|E?1~KB`%lM=B7=9zGcq5*WR}`(MR3^gG3yAg9)R=!CDAB|<{2UhRoIpIT&{!1*K`~KK8-V5l zSP$%YP)G&UCqDS);jF*AuEkiQ#j!=%Cm$HHMyvF@{1!L;clQ?;f4k**Yp-K4Podwt zzC%u{$=N8SEB*qEc`$KgPDgeQ*R|T?7n3bwh!SGr+KyUD06nAoul@YJ#Y7GZm{>qA zFz#w+UvTqvP`1Bp!iEZ}OSsWP#uQCqM5)E>tq||{&!eon- zk_N&-^wj1!Z0)ijN=jefvb&f=dmqjUN&s;#`_Fmgl#|)^+t4NV-`QT>7)>b*!B&ac zY2y|K2BuWh@D3R2`l@l2)rcBl@`(gN6EVmK`Li;Mk@-G!v4FRhipLzWkPth_&a-NI zgO9Y$I+^L8y?|zbgs;7BtwLHMo=Poj1vtC82*7USBGH34{y$+}4HNgZu7&0#Dh`9H zKw`ORRaqgTg(4w{kW*G*JN(H|XdQ`ZSOt_Fy1fZNZY5PxhzJD)h(ZJp6D(e4WQ5D$ z)q-o@_x5kJ1N>v1(=4Y^ao`u49r_gje2SCs|vQ6k8i{SW@2cTpnni2>Vn_hw7 znUo`!IN^LW!u(TuDG)FBma(BBhi4*aV5)X{GT*@~E6G7Cpz`}CB{c!Se82ei5E{q! zn%T3c3&D_%zEVXcl$XS5v)QxdtF$6ZY-1j3Jfw|4SImjCiau510)ivP@PYP&329Q% zUoS{ZevK9|lssKU)PqE#%8ZJJGJ8vV4-Cb7YqI}Qd?HYXD8_PF#e3^R%3O^65_{Hs z%;UEShF+Ip0s|ummMbvii`vgYQ^XK<2DUbI`!Q2X`w@zpKNmS8lFmrvy+UPR=<<^M zTNVr-62*fVX!K}d1@^fHU^vS}m494hI_$_yZgJ%C(y7)JT3|#JC)16h!UFP4*Ti&x7~1;c*$VtaC+b!Yb3q@601MjDId`iL^b}I z&{NW%hKOA0uiLZmLM8jY3sW8Yc0LIjAAG=H|FBmxKfb*QQCvd2(uOeo8&F^Y6+(ix zaN#PxIX{=5x&N4kH4%^*2GKBbEfsb+y#+)5!-<9H-9f1+2NHQCcsl`ig@Xlw`IJ=f zfQ?LSdHftHOAZMefH*GAb_IyXj$0fR;|PcAFIfh3-9Hs_=@_CVAdn>@5E}#rSU*O( zdO@@bkk-!PyfDc&epi;U(2y(?9FB!WU?y#se2#dK_+dJxndY8@m0G|SxCW~LiU21N z4Pc=qY(M83np@kEH8_?0U*B!kk+7~&}{=2qycGiJ?{#&p1rS8Nlu$u%e`}?x3G$sI; z!0aM^9e}wL|I*^LBE|n*H`n=VFownA30xpC`D*T^vhsNZR+|3T+sq}EXeuK6Lfd44 z+uY*1GjMWOrNBr;)!VPWbil8rkhs=bZGWjCwN_{2;3Up}KxkPuz@(cPOf zTd$)gU;B*-m?v)zoAu{r7JVTOJC?jy5dA^kTXg`71takGS z4wMRh8H^@#L3aPIEVqKdc;pa(lH(SCD%u0amFwQ~f0bRtWDdAEmH>!mcxQot3W>vl zPFdp=_Wo*Idx+8$ehvy?6ai8xWA)++OaK50lOgj=1-OwB@iD;zXuLp@lP+v*%oe?_ zo-QFx{+fgyB5zL4Nt&Yhwlv~eU8@jKTwb}6bvOE`LKGRJ6IRyDCjVX}o18RYf?vyM zy+|i5K4GRW{K9MfHCTLJ9_fBPRzV*GJ)?)k?Bx%>9o3%BO+*L;eI2=u2x&gbVS|T> zR3=aDSD}2Q64L|!&ANxIC3pKjV*56GHhi_dRxiv!%s~iSt06=ni68VWw|ROMLdtfd z1~eDnqr5{#c;c1pANUUPVMR87=p+_`L0~TUg?u6r2zTL}vrmsrY!^c7(^jve<~NucbIT^duIVHoaOVY~YiYC~;s*j0>$ZU;cuYwj z@0Re8Ej{@(%@#yS+qIx;W zF>GHX#2NCQR!>LZjdq#nK@H35DVM78UBV|bHTzhsRv=ijJXt!~Q`B*LY4%@n`v2-$U?-wIp3e$oCD>aDh5w9X>z439K%y8pVhMu4M1WCG$4o3adxXN83x|b7(un-L zUwow;_15e1`w5VdbyX$055a(+1b&Tc*ga_oHc~tt$R`*p0Y*Vu`paGv8hr&wJ?!JG zg+|IdN$V}+%@tIm2a{3@GmaJ*;;lj~-G=E6Bw;?vnv*e^;El1~K44$(Yp?ahLaC&hpm-pbF1_|XNKNb3xJys3)xn5oR2N|( zU7jFm=pcU8_kWW&_}}oULa(agsi`io5O0r`axHW?gdn1HxPN>NV9k;22M7Eic(6&xCyk9Ry8dMGLLV;e%Z7T3llzCnSbB$Z>0|9xI$7sFNl#5BbN_M5X;y+wSj62L7*!K zfW`n&u-Gg#1_N#Ef)9SMSzS(I%WtxW_~h-l;RQYn08Q?R5Ku|J{AhgL_`yO%cfI)L zCimkyQY!`^k1hQqM506vyk*rj_7L{EK8{ycma0k^C5(%Rd>?0PJ`VWTF4grSB@3i{ z;#@SDDglYb+~gi+A>T78wuNZ@1ET6_7=h!bIe1}p5Bx8}gg3AhwS^2krx1IL5J_lJ z9NpO@ZLC%Z3-|j)htJYx%5){Dg)axJ^R4CnVQ=~gWY2fO5*D}*GcLiz$hdJVm_>nH zz!ik+OmsQF8Hl204Mn&*uxU%7Vn}e!q=!=G zGgj$NvljLG5!%>3gfk$6clV(Bh5;`hyhev0W(`D6Bky{!q;wuwjTlo<&+yT(bjDCU zG4-O91!+lYTs7%4Z0nS)AHBjlAAOiMzX=YYG19(Uin$?1{c2Ek#+;`Z+v)$CJj*H{ z0)H>+Jn?|WSkD2Gcng8lRJ@Fu@~Vz9Ad)h~0EQq?cks28xwF8sXTL}wFFpE9>1av= z3kj0_gR`QbFCdO#_&Gi*3{)Bhfv{tf!C=7T*$_NnzzstLflws{ml5B8WvLT@#|wmn zi%DOW0}eQ9D8~lUg8*6{G$<%JpP}Ns2eJ>N!T(_a*?2nFyvj1`8+JSdsC|S94IeGI zFa>-b)OTeiqK+QZfxxq}B~WdoXe@dN1f{1Q2Jj0=F4ykP5Dgc;V%HptP*|?Kb*|Db zY^wx7#JmFq^0?*e@a*8Q5_1|1qFAnN|7ySH5V}47Aq~n-%XquXdU}XxFQB>BTU8Wl zY&=lpD2ypi5{C@((ebIhYW$n1Pl zRwOWb`yWDAH^cf-1u)>A;y8=6&vPnZ8-p8vesrTT<3 zx~^Sse$^?i03+laB5?`{?Ed+Kv9WMOM`@M!`HAB$sUvN5~zqVt3aefV8d<_ zygzvX4R!rx@Fcz;FuTspRJh#Yi|H7;WYP$Rg5?U7psbeo7{`X1D$HgzO|-_8DE^u$ zCd07`jQR*XPuq%k1K75i$MhV}!T$!!4H~I*{%>uW)MJN)0yx+el}-cwqmb#f9G|rW1PL0r42(`Lv6R;2vnYR=?Moma?UD3()a+3lav~XcNDL zrQkl-mIyJg%MFFGsK+rI@*NG7n*i zcro)W#uDaCt>DQOxU^;*27r}p`v79+fIag1B>;{AaNvYgL_k~X(>d|4A|~CwgVwR_ zdNh1q-`Wx~GJXGj50`r(Pt8g?{FYxDIBXjMKpF>ytL!7^K!y_!hAe%ciGtooR2TEt zFZd1jix?d<^U!iL-zeUuSRv*A0VEs(q!sk7h0nVGYS(>X!|w$ULhJ9xfuo25&R<1Y z0RR$#PP2WFZ#C`v_HJy^QUVYx<-+7ojOt1)$DBOjL2QZ(WHC;VK?0MlU7&e5_KV}T zHMPed)P<}s7#j%!(A32g$u7%IZ?ylMKZ4?Pl@eANvWG?i_9EzZz-8?9hhXXk;(D5$ z-dNkXuU@M&CD>V4J46|5g&hkSqRHUiXa>D8?>E;%5ov@V+A(veJCQ1t!%2FJ2>86d z1*j10iY2cdFf9R;11SK|L@VGbx4|XlQ-2_;Q40$Z@V1jvNsV{$kX5qtTfl+~Jk^~4 zgJ5J7gWe5$YeR~zjWJd`(JAyT^VbxE6oX3wB;Gaa3rROAur;Y{h^ec~ z@6GL77$_hubb9F6kfCAjm*_qB?UveyVbJ)1To?p_f`R~qTp~*AAI)<_3~n{j9EZ^Z zUQrNxZd9_1k8Y@k1cS02P{`v>@o2Eptb2e`RLcgqNgVEP?t{X~}Y`Bc!B zUdYNq#Y9D6k5j*owpuGzMqdfsDLWG^evh#sok|<{gf-v|#v&KAJeOe6_#3Vc#pnxA zMubR69}>nIk6G|?^7cm1p}`?U2nr`p2j4D|FsN)e5fIBk8{P>Tda4hDud8V8D58e3 zMqIU6o=Jn5M9YLs`ybzhO|vjyCoN#8TR^N3i6M>?TC!mRkg1_OS`~o^SQv#AyMj;m z;vT==!Sr}7h_%|=-*|UWee=DPn7vOqX{q%(sjh9$nL_DU%ZzVyVhMniOEOHM>L~)^4 ziaB&Rha8b&4is8{+W*Wy|KaILB>06bCLjL-C{w|}o>5E!f;LkJzFWZvFcK)x zkl`Wm^b@y-M(fyg;jv>jwF~~Q>CA{AneD)YCxdV# zlul}Ae5rkPQV8Ut2ct@CfBrz4z~Gj$&}F}MAmLjduzMde9JYO(uYq1ebPihl7%Uk` zxv5PQK#+(mwL9ucOH>dT9Fn_{#tmgHGi*TM;3`mGfZ#q-Tjh4fi7}{vkmnjF3Pnc9 z+y@-*0!?w_-zo-N{F&kb!>7UK5)a8IUUC<`n8iU6h*}nuOVF@d$=C2cvX4v6tM2c( zqq?qa!N35B*arb;^OFjLf%slHCCkNVh)eDAz6QVvaP%-NEQKh>0w)^&)le0$bKw!3 zJ-h?|0RJn-^!GmhrSalDbzC$P1vF1U`?y9f9-fy!i4`f{y$@3d=Jr3i2U#B#mWf^%3I!hh`Lly3vr_K2(V3;+=p^s+4(QcC z+uHC-9U&`RHWSLVc99L1F@x{u9HU8`h^RfX(0{D1)|;iY;t={{dX9mVk}J?17m&bm zFo?3|Bkfj3FQZ`g4THq2x}4@{NTLu(o7BdSIGDgFJRH-6e5K-uLixIh~G(Tb9@hPX; zHZP+2koF#C-?IAe7bRW#Oko5LrRgP?-JuOk5mG1r!`eD!8J+IGKw7WDp@%M}MuK^DG@z+y@7lD?5d2;p?~$0Pq3i zj5iX1;L$vlf%g`x!Nj<6J$$={Q-PN+`BSN;!SMhjKq}~Z8fpy09b+~)d5C!y-3JFJ z%t{*%>H8PG;)x0Xt%wH{D~ppaYH#s0T<}2$Y|C07!yuR=)9M3jWbFa{E82=RwfPZ) z5O@7mAg>Yco`*Nki!C{2qx2)Qef#CqOhh1H!^5gg#H?Lie{iM7Wcgfw?2TV+N4Tt4 z2iU)efd9J(wuPjT9I=jgBzLlNlv?bgu4(nJ`VYX;VbdOS%&z^v4FWi(J$M`#MXcE7 zDyOA-ierdOY4{%NpM8(Nm%>4R28YQf<84&!Ah zU-%{Wchc`3gByGU?}M*;Z|!KPboKOqb71B8f%!C6ygLNL16RYQ-9UsgDiK$|N7woN z07Jl*;DGRB*QK10^C)RdHR4R=+GYEBwLgf>G<5CNFI)4|iQNO+3z53`8iL|=-nfP8 zUfbd33GU_l=`oi6f&}nRM5u31KYoy%4Y9=3sIxbKQHUs z?Ua9ObvWUQ4kwmw2Tc4){0vFUP%()dw=HD}Ws#kNnBe*VIy!Oo4=sc+RA)N9?et%$ zLVCkRHUo2xO`EB0#z6p(;E}~4#F%yh1|&1z0f&rjt-JjeOHMv`HR4}Ky#8@zq#WoE zcRUnNcQ&wSBQ0UU<>P<|34mw=0A(gbrl0tG2N&Y5hXNoK0E`gra>i{DHv+T?D);~Z z2P+}G_~NwWaxOJ~a%!i3A=pI3XjIauOxcLx_|e705FxwpG!eLGI}U&8JzQdczOCXDhdNv#=Hw8}{Qv5!zJbht>lfCh zD{1BAEN>wgdOgryz??iefFu<@b!;NFZmPISDpITdRGp%t(xTLAGxn+fT_#=QZl<)w zV)i9ABJJz6@wZC~vXk!E-6d41>ACT{f0eu5^s<<5RRhVFwfoS=M_d{@GCe;uTw{9c z^6&h#9c>uYiH^A+>n!E@np@ZTFs*;8oJK2FY1lpCc?+?9yXm6`^ElLrw`!*wNm|zT zAzy#+wR;#Q>?VRi|L_^|xyJRqiR=CFpNEc=J(m9Fr(f!^IIqnNd&d-lFKs0E-vo#65GXM z8cVOvDjltS6keh7PfU9$uYRm))}c)aHQHgK9wR7VWO zYfV?LP*P#vj~Pdv3dCb#LZpMls26_bsN$T9ecI&q1~Kbr%Z9Yq@gE*0)9Nq2_QM=T z!R#OSi#B^UeLUhPBw7y@2EpM$AO3?1%YJys2(BP9dY;nHkNAm5BW$%RWc;h~5w=gx z5WUk@(1DimFgs$neh9A~EC(v9UyoATJNP}a#ta0Gt;eKSrT$;QoPI;GJ)yP#L4{}o z)&W7r3)KVZ6oSHAO<^=+;nDiQ&h(*rYn#^XzmLGrGP)J5MOq*mR=g zE5d1ee2TVI3!Z9CE$RV%QFSC2zkOKij>&xtJ*}KcKRVc`;0ri3B0F=!ZAxgeY~Ett&9>6 z{dI9V>h>d8Ts{I||5aQ^gjk1$c*PhoB>VaGo%ELlQqTh*^6Nl_y#}f?{{_(LA8;zt zCG{83X^4%517it^0S)@;{oY&|b*NlR&ed7F1VhJv;KVmy{h|X$Mk@oQ!F$`-W_ z;5l1pR|~8%fc!DdoHWY@Z>PVPe(+?1mq_c%NjsE+!ZRNIyI&ZR-4T?!bMwruFNTTb z7v_rwvO)^uf0hojUijHr3&0MTF<(lb-7;-?-xV2X3N_WS&7bTdaQK5g)<^C5N6Z)< zNtbzqJ`^^A%E7v_^WojXQl@9xNaPgr_#w8T zPi*YC0qnG5Rt?r)BY`r*z5}m~5&`mg=3yS&D|+aOvyUUgs`m^$L??j19tT(TGPteD zzm}WNsuG(_Zv?Gc)U)~cQrG(?sK%_?+yBJcwCEV~%KZ7(9F5-j>yp1qNX=i<_}eV_ zKN||oACf}4HySrDms?FQ7!s&S|Ew~<;;~e3nyLOgi?3#mTlVvCu(eMTc5qAahHx(A z2&v(CR5*KH0fr>rXUC=c5ZE*`oK+ZM!dG`FPSo#gv@vV`6ba!43smIRG$l-d;r=i5 z-)(uD@6E;xJcBi{*nNj^#Ao=%9TF6V+FM}X5f7~^e|0>a|BWescHN$!Zs})(v?ANH z_qyQw{@e*ylKfWq{Cxp?KKH)3Ael>MQ^S(I7n7DJ}!kBWar-GW%43(Px@+;YYeBT&^$JwOnkNzvWjGLJ2KqL0{ps(G2{r8ivdN&&T z-ukT714B*Jcc)yb-+R9>+*8ZH`zHKuNUCV6IZMX7cDy^-f*$|Wh51a>j;F4pQLCT) zGxdV_x9@(l41Ry$oxc0`e-OIt6-U~yYwOz6@C@fCumJBq^=ATzZ4cJA z!1IMA4m2g*uVBP)`Tw?_o1&#WZUgxJ_=LJnaqJ3%bUHhr#|1uqxC9IP55ZDzmvt%e z-K`mauT%(Mf_kdv#c~&y-~a#$#v%4t1vr5b?k7IMa9lw{8;zXfUl6^HQ}yZmx^l@Y zh=@V|0HfN?y`syxC569p{dE6d`M=dpekFqdM`XZNu$ec)2NF-#zb3u=Q_DB{sZYi6 z5b4A3_EUP|7{02itG;*Se8PA!lYr`Lyhgq!bqMVJ#+!*Z8ca{ld9$!=8(>ED2HrqG5?A`sg-3$N z6}@}i<(I~S!3aUdoD_LCBl^qd6fh(_<_ZLogL9>AUe)1dFHR zRD!SNmzX>9Z(-JN0(XN|g%!|9Ed+x9_MVq2r))Rap>$FUQ3((UjhjfhlbJf0M%3yP zo=(aLoF*O$E*JUhyif4%tgKTGv3m;=ff$g3Ln&UqoAXg|V@%gsdMMZJpbE$kcJL0E-lYj)(p#LA-Czd%DLCl$%{It?1c0!A z#r84akiFleFqP3GkLiK0Cb~5>`Zk(;QV>)P9*rO3DADNsU?~S1z@gE!!~ZN)2pJd~ zBNl0FA6$B+Mu2+SG)7YDbaV;9z$gbcQJ@qBb!z$$TPY&Lw15bUNtUc=VXc3xbjLAedC_5 z`V5|V8NbA2Br`a_wdUEn;%phez4(BH8y7o$Lxy#Y75zJTq9P&)qA_PBKo7N z`o>rQPe36PZy@~r;}*o1W_P!re+>>65aVFV^t!6QUV2v$<}qJS%@+7L3BRJ1@!H9t zn8JUf(Dm`W*uWq0FF66D){$v8$`Ud*cCs!Eb?KLBLL5dRX8gDJ&=)6sK;& zApvpSp+By1M?2`a%3>hF@lW$+AO2ywv731A`+5Q3Xh!n|@k>&N;B6ahcY z;l%R_v6}cQ)WN=U*9GqG3J%I|_L)YcF8hA!*JDwTO-_QLk(gAjOY66#cT%5cwc8Qi z-VPASozR?*SU7-=+@PQONyOj6Hd;He)`8YcmAG$F?RnoY;3m_T@M$J-^>ERS-!~#tV-PMO-H3JP5hN^$SzD+1iJ8^Fh2Z5z&XfkE7~tBc40LC`aUi=!_dwA5 z&p9Ms+|k)|9ss@6{`4!B5HDAsh(q1%SKk>aYvZ(H;Wg|# zkYByIy4@G@z|kP7e-Y&6`>Epg6eTL*1Wvp{^dLo@gJ#d%NS1xCmSts;>NSFo{7)5RN2Xq=+L!4k6%a9gUDoY`TQ1(5d{JOI8ZUm8 z7(g>Z%me$DX)eqQ3+`_%ISeBrOyUHE3YixL<$C&R)}rgNftcx+ovfQqS%ex9&`!eY zbdcuMx?EBZ9VgGEoIc){=>Oa1j77rVF5tSpFX8_3_@iUz{{_K5355kTqO#=o8@uZ# zS!Q7rGv5BiZMrY6%=n%QNsk4Kxs@IN5Jh|%XWXCWmK{4mLTmKEHF@J zxg1E{$w&s7eq$yxkl>Tqwzmb{^2=sbaU;gDXATrhu&JQNOn#lu;~nF&ASZ!0Df5xo zDhwgxH(GS~+a{4u@Z{dkAyT{OZ#YkNU4=+J~BniEw(s0twG_6EC035ksJcZv{tHg-)OFa37T;g`!&=RCX_l z7B73fyu0fKaflOw$Zmb#j}K{yI+^V8)xO3?gn?o{Jjz3)r+(zfVBf zHv_K!!A*u-4Ik$49kr`K>WPV#kGC&)fKUvFf#BI~!nVP|10<=7zs5+G5GI`r0G z^=3K(j=$k}IKHh-E@t>8>%ZT3`AvWT01B%i_gn=yM$eaP;cz4ZcwUS@o|V9g{R$_a z{Zt@d1cw4ZArAr(AcpxVWd{;`)A@GES(tuX&D6-!{M1QTsn}<|^-tCmyOSw)2P-;OxiWAq7rD?eq?g z$_X{dvoM^)t_i9PE+^?1|H`&glpd2Og_vA9K}K_8p0X9c^Ypcl|J75#iu)^rgrBGc z0xy`6iDo3h;2=Ky;S~Gh&F&??&A*W4eS^Kl_6>vBHV$w6e!{#5QK@HMYs77Gi{~U_ z4m{A}4BrWl_#yUmb|5%8Z@-j|=0W89YJ!9%F4Tqy6h{!Grnn&l#k`aXYd84N{f1!R z+2w|q&xrsorcVG54n*w`7~nziv>Qyl&Ycl53!q`N`aXGnu9Z|Ps1;c=lJF30MuVXG z56}rxix53D)^Z4xKxq~~EjZFYT40=jf3+Wy_1m)vICL{ackKEV-u_)p#-VJ3g^EWP z=8thg`Z^AtIwMtD*0p~T5J)3l?Y6x!8V-ZrMuYkhIU&}LT-TSiB&QZOQG}q|_C5|G z;N-g>14C!_JX;Hb$9s73eRkWFZqjEE)phw-35q$v&`k3|N-V*Js1u(;O8I^Lm%*^` z4tP0p!iq0!L-yY$7|ld;v;~2b=74|wrdB>0Q$4zr9DC<(|DhPLcs?R5A1z}R8}-u@wNrQoCMW0N~PC?KQa1*G2q+U_*amiRQL>M^an6Tvx ziHKHm(^bUSIt11Ufhf_NO<;D}2TW|aFtaS+FnhFb{W()_08~IOgTPROq{I>t2#{#A zQdAx2!G^&Up)v504om;DMIZm&vSq%)ljZq@3?ao76v!q@97x7b-f--9y(V8o9%d|g zK`Ate^2IaFP80Kp1SenincB>Pi+}!G-`-rmXiNn-a6~dRzd}_s%awDMb8caTQy{^a zr{Lot5}tD!-G0*o0A!^&AU=1mpsg|14TA&usQi)$G|Lq+azJEd)n*}()*=@#W}#St{}i)C<9 z!OG{6`~4+@331+YNM-gCuE{VQfGh!Xg0qDSVCu8~r-O?et6pjL(|5jrLpR6aip`%a z1mDm=GQs2GT-lX86@tJ_4+ai69Ds%p91$9nK^1JrAC&OXFnDwfy7l3$k-ZVqd|UPs z1Dj$-6Zo#*+h_A*`~wtB`0uZ!R2&B2G&31sz`ziyQfTe@`V#VzgMb&(zyQvWIuj41J!wSU0nk(qA| z11x|C>{}ekAHNu+AYOPS-_?7REN@C>2at)d2y;m)c%ODg)B};fZx}e3wb$bOwuwOj zTkp~c@g1Kp;NTfp5dq*wf|pge4wGFE4`ro#JhbE-EJ>L0=%f}D5&(EPa?enkodiWp z>jLH=a3At?HS?-5@zu^-K<%G6@rtBFpk&9t|j=O0P*a{6_7cG z&=SFX!N-VrD-e0u^)1vC31Y#(HWFExzwiuIg=t3xX~58yQw|lS4A@h>YurPY%ayUP znTJ&!dG|U968XOtD3|rk4j*q~L{_kVW-lt)%3%nW%1EKQ4hMYW{L6B}B0c1(P;>*V z1jsuCJ;aKn4NZ+)9s4?WWz9r;L!4NOMhMC*dI!Iu{R2SX_rakGv*#5l$HX;WUunUa zaYli@51yZ}OmWCez;e;yz8eg7KWa1*@cF?F`XZGHf&oCVK=Swxb@hX1=R~c5f)X zEtSP^9>(31&H_?P(+^@Y@zTIWwFGDTjUZcxqj~vSP$UBY29g1(0Nw$V0&EQcqza=z z5a0!vY6om`5qV3uAHm5>LICYL)J_>|=5;`FuJyd~BeD;;~LD*V$ORf(e6PgSQ7)2X4+gFz4xcVMYV;ga_98=bO=;Bxr$+bd~ji zaDgIh^bgL6e|Yl<=?{U`I2^aWAE=@a$YaZ>=xRvd;L!LfJ-qWm4C6lMlN^4UGqBif zXmCk>uC?4)fdXs$=Zf9k{&8UoRZStE{@%_|pyJhQ>)TP`p?>f;3&mvGXSdW)yG zz>P=O|M4|tTjIcKfYFew5{JZj?)Ihi*w~WvUL{p9-{iT7&|0Wz)AW;){$H#_V+^2s zAlX0Pvhu+@Z;Il#qFUtyNQ`ziInr&l0{KBQ=wbvwXs(XuBg8Hxb0AR1o`_{K_u{@Q zLH@@@1OiQedIrGMphz9xt>2lY3qh5i-tOIde!46MNEks(T|^(%@m3BCdqyk7LEnF0 znyc8mLa%gFrFX(!#=>Y60`CfIUZ~4;#M&cc$v%NddlF4P*0a6}K@m%^dO|Qm#SBo# zWK#9?EKpRylRrGOCkl*~VIhKW9L5b`7$kM>dvsEGJ=7l2#{Vj%hstJA8?%NOHO4{P z3h1yVtsrTK=-g%IvK$gO?aXjZyYHjgl(1fgM&7F|0N>T$vyglASbhy;p9nkTBn}G& zUocN2Dhwns=V!R5wkMGf0KgJfxOg0Y;;({0Dwh8eUd^2gmr$jzr9s+ zK2mn>X$191L~Nt1@kkF@?7jQi3JM>gaj~D6D0&EJm=UOjh@)7(xT5-t34#}Yv~Z3K zuj`L33lc%VsvPBB&%3|dgeDi*qGXRx2@RG;S{IkZ4;(${AiF4qh^?=S91(Z;15r}j z@NVY~=>7hE<%E3-_kTsGPB%^qz>Sg3wjf}@Yx#ReDcND%gZ>-w4c<`$hJnP}AZ|8m z;DOkVFkC_;U^uh){11cnmIQ8H!Se>u!br@<4&}&oWde8mwqeU+A6}AXc~=4x!4niw z3?M&(+ehFxTbt;(3@#(#?+?vdtq@^=+HC12^HQLcKuM@bDQ%~Vio0vD!xiZfwfKAF zzkmP$38f+UUE^6c$0+dzvt)wRVn=aK=+=6l#K85H}vxbI@hmzO=Xq#I8b22O=3YZlVODO_^q>+ zc3MAAF#G>iDftSkGSN;HSe}wda@EaajlZ?A(IdfKe_c(4PMFP9U1Vn_HOCErzY4U#}tt{_hcvWx^9tZN~|Ka2hLBQXopg+hxB+oqyl(e=k zmnv^>T@TGpSxbE#NRY8zERy2c1?ah1Y*sH>hyV9S6+V0pm@fe2%w6!yM9N(uQcnU% zJnUM^K`93TwW;uQvSbOtV1kKzLvqId31#?tEU^*+V&@(dWWUS81T*V08cEd$1Bj78 z6UQXn>)KY&7O0`w*`Wk+TDu@!+;Vzo7X#b4hOaK>v~ZXcg9suRU^&xV2c-Vmy1)nz z$Y&Sp4|;n_|3QDz-{q%5r2H9GY$pjPrOVp>q#fQ{{`^4mQJ-cHWT5m)6wXNmf{ip1 zNvHpfh=cM%!IuVsf&A3CHWKkmhXTN;K!bvVCYB5w7vcl`t@5>pgApbnibNU&0I9$@ zVjiq;XH|Eji7Ysej#-$CG9Z{B-WWVw3;`**dXBse05-}SDC+rk%2UCEA_d{H5EKjt zAzVCyzuvaEB|{9fGy<>!YASTA$F4wCTa8ktRAmXzp*V{qBAO^r!0-J8K#2qX7@4jI*&!hNiMb|r| zD-=*I1uVEYg2PE=;@;;_h>pjgo!j#5tKl(#1TX-vgWzZ^Ocw-6*f;=zJrH)^K`k;P zg^(Bkl#&upMrp$+em?VmH*y%bv7842D$eZC1_QAeI0FKBpfUmgN^`Q703e(k1@rQ+ z#(`Aen34?ooI3&j`;8N$)aufvA986jNW?9c`M{wlwhFi{+pb^33D z;*sCi$CuV#&&hE>N)&{jiXC~7(x$Y-HDcB&a-_Qtn==C)l%OEMK$M|Fnno+}FZ~>e z1O_20e-zX;8=Ez==&Nt1@(9>IJN6aLd33?FS&2}Hu$u*q{tl!`GX!RAq&?cV3$Z&_ z90Z>n0dMCCZ}*vqKRg4HRJc11ffyEsSSkq(4h;^Z_ojgF2)N)i9m#1zu>6^&JF<$;J}f7j|GMkZ@DKmEa=C@K<)dze@|efZEI zw%4dVU6M%Q927$w>-fG~wu+b22)(VBvh?3T^tjYJ&^w@Xe>(Vxchvp!gU{0*OD{!K za8K&1rduFL!+trZ!^s|{-c?gTTmkS&0BuZH3<;L=V8PX0CJlHB75|JcOd_}L`~5en z@lP**n}U2$aYhc@d*I@}?7?Yp{=#>t3mn_eSlsgO1OZ}Bl-Wl_I1s>wIbC(A{y&Vi z-^Cka3}hn};;_K|{6v%D=f#+_^Tb)R*|YYDZS_)bYmyUMJvgsr1t$XCwR8+sZFb(n z2XbC)25n_0IpH0@Uc9hU(O|$?mo+aF<>=>Tl?4StT0YKcbhj!ZOao|b*DX@Mf<~C+ zc;O{$ur*33uD_L>cmOE&PuOANUmw@Y?~a>q!c4Ik2@4TJpA-Yk8pmRrW!*yZbp??J zM=A>CrBv;KO%o5Z`<&Mfh!Y;Y0qJ?CsXDR2Av|tpfqP~_GDXGJc~w@u2$F@p8Uy{< zFJ`g`-&S$BoRP>K$kb~N+5_l+w@>(0DFg;N?Ad`?tVtKB!Ke6js$bx$yG!qPL`DAm zrxvg1e^03yfr?JPa`c}KdIdnQ)-M$Gzr}ao!L#wizw>&^wiONm9B4#xAK~F@RdMR|qnkkn&Vi&>|-k>s#~;L-UE)hb7Sg2X-2*`LB-?mQLa~=FP`~x zEpcG{p!@^&1W`cfQ7}8pEL^_~TVsJBP{jZVj1mP!fFx6lx{3}w3&srs)QC}tU@Iu} zzUXL>T7LW4*0I6@cQfFP6ziRb_vsi#{Nx0>1821EbmPnrC>^R}vllt&&-jNtB#(ou z-rVvs_vW`IZK9@6bcr;>lj0yD<{#1{=OzvbJRBDDKr~GStP&($gRMpbvkgg^sX-C} zr362J->*Af5NQOM>^`PFir;@+SVX?vhH!~|%w9BkI_z+u`v`l~1}q_Q!O%tZ8DHmq zw?NA!jmo%`MB*5TgI-n-|G{DH7bBbcVAZ)s&F*Xa@Sj8Gv7BuW6+Plx1CRUOmdrR< zJGU$@xDn~uVB$OP(V?Q<_z{*21^{G0AtJFYAWjU1kH{77j2x&l;`|c(2Fxj~fH_wJ zQ3MDAaG;Tbk&qClp0pWQ(TZ+7fCgJ;IQ$Sl*!T*kVsm8Q{yhbCs@F9kDbOj%)uSA$ z0a5;AGBn=ztxV_HZy;G;L>3sm2xp=N8{Zw&#z_bhS=y9Q5ZNPwiS^UBY z+wY8uZYbZX;S+=ww7+0evdW-f7&jBw`l{r~^D)N~JW|~K>$R9zNv~_;9YpB0c)_4f z1bj^7?B}ft9w2ZozuLvjAVD=&wUw@8bDS&~pj6tcTs&3^Xaj&)`2MrEXXsC7-<4Ld z7sM}Za0~Y?<3!^Lc3*3sKC}7XVhA!Ymx|m-!px@E_JwcCwHB!5DAd?N==yjur29Ev zYZK@5`T`&=LQTKr@1OK2S|(b|dj&*FYWMJ|{XgP>519uX2? z*xAjK5xh2sm&84#Y%AA_)e}K_G=ss^#ba zglbd7F+m`R#c3kY>VA-}ic&EnMh8kvNrJy+D(Z}gQpF0N7)r{5>W;`ZhiQuLm##7z&KDhD(yLU9DB9MCi~OHgzi!~f?9A8Dcpgcv{~ z2^E5Y0bo`2sJy98Y^3umBn~J>!7!A5^zYEAKIZ^~lte80gP`Ezh?t88@>380^b(+A ztDhPe_bcGU1g$k#{R54$Omw(+8?iPA8$k*_TAHk;cFt@6$|AwAbu_`+J=hS~)J=mG z269sL=~84MK!dGl3FNN+CP>7kTMY$+0ib}TNy3IxGE9b!uK|=2W7t34^7$GcycMLVVW!mH1pEJ86sDO$NB1xRU zJFaMa4N*Pp1{3ow`@4%s!c|*xIgnR{vBT(ITQT0Vju!VN^1t7q2K)wr=pK?xaMZ@y z3_e^R77SQCa`!4+x73mdI?4Hc3$|W_k6+!l{bD> z%Z{(gUqRS`1CYA!#ch`2>n`I?YczO7;HB5x_}#p-4|5eRJq^3Jw>9hSZ2ftSbqbjw zpJ+x5UzXy->FNR#q?di7HJ*G|cN8pH%0c^i+rO8?u;L4xL5C5l20}PJ5g{H;@#UjQ zA=a)X6i3JBE7<6s5%xcdg9jXnmW!e?iPx3zc!ty%K}X~XbPq^U<)>L~+aAhUg240u zhrk$tp&f+aZ(Kq!sk6XR%85J%e)T*wSsJg-KYYoUm66m&H7GH34n$(z_XlGvOfWd| zqZ%2BxC`l0e0#;zoZ5!Z(kDW(LF(qMN= zvTs8d5a`#VTtc4T#I=VE!2tgGc{Vnt-*viYmnr?izY|!<4xk z%)eYsUu4+XvD{2Cx3Gd8+-bGbmc840+(cY6o0 zV25ko`{qgQY~yMKDl5JNTp9%0_wzYB9}>h+2=SNjg@1c|x5Z3~qQ$Z$=s6g*61DjF z;SFdS2hcXZi6(*e4LvGS24Y3_ZG2QwWnzXGTDln{FH+)9WiV02zuy@dcww~{+vRmk zA{01W;)S8d6Mpprl>w)!1PpGAmRaw!Lq-O-S6(Ab#0xFrv0oXPA`H-h$w1Oom-wrI zDB_RFvfnjSrL_;4RWIK&MH<)&A8!}+!~+;K1d-cCfZkh3K0vsD!U#gd1O)(nKpWw& zziVvRVg~~ubiz&qxbP}W7p^pQhY1Md?YcNEfedX6fQ`SZxojI1MW6+F2 z!LN%aQe(>MRnNbRh`M?KVuvE5V0R)0?KfMs( zE$C1wySr+Kz3<2;<28Q0RZ7M1>?7*^RaRp|0W8D~TJbymTT8GEhy`dGf?!v@_3P}< zczzzjef#=pzq4q_j3p*6zX}{C^atx*^N>i4T9AndpJVYaM;QP7m@U&jE-~`ZwBNaC zEBtpsR^m&%2Y*|b4=v&%5lZ1W@*)r3+qVmu+Zg3_VpO#hba5rrFlfL%>h;^}W`&(# z{pFm$WpTjxw~FGs;c&fbqM1IKe`;E31%{h;?-*@P_#^Ys>E>rQY5!*-fw!1(AT{}S z5K$%q=q(UHpj>Z-mr&xJ(!MrsX>z5tG5LQh;7h~>wi4Ih^yOS25eNl9OMz$s8~`Pd zbUbL*^^}jU6G)IhIP)B_Gc)l@<>aT@zM)#0`sMo7KHBo#;D|t>1H~OO2uX_mA0Z*uhQARtLEiRar{+Q^8A#b{t(`JwA{%Vq>Dv5(_(wpNGbNh&ceqGsX&Sj1>6_UIY&acGh? z&^cF;*k=G<00}H8Hx&Z9E_5HG_OjrpF2n+Xfq|M&s;zNuo8ho{R`FhEXAbsj#7Hq< zalutoFIfku&~zeGxBliRh8e+B>?ZLc#Ze`S9Pgh)p9Y~OFd-Nt?zLD7h$EwZDEb2? zNnZz`2}F#g0CON;FcyI@porT%MSf4FlyTFnI{;z;jvTv38^6gk6!!CjDqRy(a&ccq5xiv|S zxs3XiBi`;ArM>k7z1{&di4O`w%C1OFi|B8bSRQ7my?tt?`o^1h8a<}d@i^58zNx*( zN^tFx+DmNZ_bR@~w3Jrz?H4G|3cHq+q>`-qRN(p)e@vdv?v-f2&ar1#dAP1%_DXeK zSK+=9?S!O7(zIO)p^NJ(mOUK0XRJjhHXtt|4K4L~-Eul^wGn3&FwuNc$X`=Y`K8K? zT#^7I%42XXQDN)h#DceDv=*a2CF!S?dCs(#Ur$5C>VP3WQ`M9@dglP>B=`%k@+r5S#~(G6bC!>bjeX!QwA6=+4NC#OkZq5@)AnpV~^eJ2$0?AElyxzKxu< zUG6aO`f-_JeU$r{AYc3K)=${?S59~nRSyp`_g+@nMbF?ly?}Qdq(^| z^V|)W$dg>y`B`>o@kztv0^MOF>B8v;vHP@NP<1ViNni@A6E9v9=6aW*ZG!Z0oYs`H zu3Wpm|D^4N4uyoN1^0+6Q!2gR9ES+~UdJS-+iHX$z>+o8%Jg2>BXEu4L5YafLf=O8nKaV*S|=<-qq z3%_)OTD=p}BLQO?*7z5c2})xr7q&pM(WtSQ6Pw7Tx-p}w;!rinBFbKpnVj!N;9PY! zY-5l1J4>n&Q&--{FsOaG%lNXzT*I=y?JXjWYaX#S6~$3j8oQXv?VUDYw{%ME{Uo=k~mv_rn{MGH{XrHpOv$Xn=fQp3M!%$+ZN$U zx6nLruv_8Xm^ooS$W@Gecu)%$aork+OI|8_eZYyD*mIb6wdk60dfWsh+BmV`T7Ji~ zrbRaKA)MRYC(M?{QY8lG(7c6SVJu^~d!z^CniOnve0qxaZ0;-gX1iEqQsmae)WnrT z*e?IYOM7#jdsYgl*z;^YrxJ2y*u4CV#CTg}ZJXxh^V8zc0y_{7#>y6LtBEcQKQ)SM zGq4~a)j&~fO7e?}h?#P~@aD1OY{d;jcO@~$k*n%%fG)fxeekScno9P~ES(-S z=BMe99I1MR_e%Qy%<$cQ*0L?s1&mKq-=%edT>*iV8Vx(Y96@qVGc&3bh2w6rp$(Q* ztlHWLE@o!#>}>Rs&p=b!CQFji^nTtw^<(P%4!WbNJo2voT&Xx)T>4?2_VledGd0QT z$ew&+c-H(RCKmNDLFYt0%_VQ2elv~h#{5@PnRL2?%e@F+dUfCAe7WJ$HY7>=W3N2c zmfqns=W)7&v+EVCqn^VDFVFCDu(4;-UQ!Ml&*Z``B{fprL`A;s!)kTX=1(N;_Ru+g z(jncQfhnt1k6bM+pV*q%nytMhq{!yDeQ(Xqqm8N2(WXQRag8^}FaqaFXN&}snLnlI zbgo?Jbmny1v$UZYzN4fg?*ctDoh}=O*DBkrQNC5Oacq8R;N-^c^;FvS#Y-nlA{al% zgv~y3@Hc3;98Zj;5K?dg)_d(6V!jG8DJBuT>K-{f>_nTMJ0q9cJ?)Q;*RjaibH=k< zs_D;IWU=dc_HY_%U%pwf-0KQKcaKTSn|fNYBs?NP5W{5pHo3%Z30g6E+~+Ny?ksY& zsNoQzD;T-9AW(d+5$zfe|9n;a$ zVlE_6EH+7qRvbHLw<|zytUg=&eli}PpxSySrEKm<+(*vbQG#3Tgzh} zTD5CuH9siiy2jKVpDNQDc`m=kI$&4e$7|SV@vChg=Lag?w!-{~lI35$7mG4gcG)I& z2zudluXHH>$;9;R`q3vm>PHTZ500XDMY26WzEba; z(mEmCtrPiD{FwXLTNO#K-tlLY`AOZYb56&Tml*CVp>nw|bC-X4|H&T#N$4AhM7cE)ql-aBL)ml?6XE0ZHP-L*XBXl|HK3@Z?`3~W1 zLNsybC%VkcR_)_M7<^_z%g~(w&N_bc*O_?C`BZCbO>R2TUXgtR!RH^F;#+t}Y`Dw6 z9t*i$Ub6ArfaOf^!H8pGgo*^Ii9JR;p zrvDM6$Dep2?FkpKBRiSd)o_kt(Yr?c>7~?$*aHH|4605O%sXOw2Dp|EQ&^40QbB4=&>bzZX8R0duTqFG`mIA-sO^%O8-rx;nW*<;?*Xr{+ z#aGn4?TN9MSHFI?!f^+2Ra(2>qP{0tJZ6Q(lFjgpvYSw{qX@%qw|XSGT-3 z@|xKv_>+E((0ukp&gHtq9$IY{DNp=6RyCEvt1C2-lFTuVLVkyR1Y~y5E>=Hm(>H(K zB1;M;ETH&CuEa*8)ETjjF^q#>NQ|`~+!)o5J!LhD)VIkN;lI>O=kiP)t&^I4nkD(x z=jSPx`pi3AX~K$9=qc*2pMGw~z99PD(`&3WBcC5W)RcN6G*sR`E@i>;^vxw{N-G?;KGzjW(UC(-T?s0Nh zr0El`jYX}sg+8(Q3zfbP+&B$p#t)_YE)8>chDKU=cGM+kuQ|qx5T^_Jir8OlEPr*e z>lN9f=+3E8Lz}M}gWuu*K!N(f=ztLK@;Aer59Cf+g$~w=uoi6J^Ju{v5dvOVvRxZ( zw${pz)jn3Wn%!KRK|&npQJTlm!9%9Zf;LW_K2v@fRP8F?LBbvpS)Z(Uv;eu_ynhO# z*6p4})astbxvu?h>}THzs*r-bxIbmf9sGlHw8OB&{^&8BQILeD)821)qfL@fnz4~~ zG5r!Ddq4Qa&}b@mo>OJd`S9xd`;J=g#lra4Ta3n8N?JpKG&*)Qq>nwLMXX2NXU=$U ztaUoXIlZOB-8+HuCwqLSeTj8UxW^iAhq-{Biv7f^nj_0BHMB+7KTygW-1hd#TauB) zoA~3ryP1m*8fe(IIYzwZ2=^{_H|7nI&efE4YCo2^@WkBi@!4~lrg!x=j_?+U@vfyh z%YA!Uw79G~Wx9nRm`!hA-w*^ryu3r)NWhV?qdN?@2|;M!Q*~BZ1chvW{nP$O2h{)S z6aGQ|Cy54pCI$sKdBUJwkmoO-sDFI^Z4LbXyZy`0-^jmV1Q1Yn;05kR1Wnw96ch+t zz{M*t=$GbKq<`7{WeZL9Cb>Jon20y&Kj&`Q`fH}58_6r+FB?dRJIUp5d7JC`2Dpfr zxPT=QD;V+x6}s5l-NzLK&Jge432p=bYT~5dV*;cAmu<{;+YW~@IKWHfw~TfT3?h1g zoD&!n_-j#q>yZ?v5NAb@x7)wVv7P(^Vgw`GwB7y};qYyf&Zp~-_(7U?3c>S_Q@9Y-f{mTGvV9>AnZg;kaUIB3e$wJ86b>D7S zpzSeXAT1zUpl~2E&_kdPfutZm;D`w8|JDBAjY39JPzHE9`>zID{#O}<{^HwZ;r}hu zngQ+%$bUB|AB8 zFCJ`Qp^t~7gw6wg4)QET5Jfq_!a#D#-HOZx>lVCW}+>w({3urCVvt^xIN+Gm4T20frYxX-(CNdRWQhMV@T;J+gK?-c16JQ=^st633BbVy76;rHY`~FX z5Ca)Z(|-dyaK*sA09&_!SAabRpeMh#7rhe9KMuGOpzU=N1iU|tkAa=@2qMh{I`E0V z4|P)mJ_)r{1zX)9Lkx@`ATL_*2m6N+eABmq-WvKh!2Whla?po(IHF+eCiEdE4D1<3 zb&!KVcNAy^=m+2-SNaU_TMd^Nh?SunYC(i`F$9?p2oJH=0f+U$&O$t^UtE;K|U3`e7A-cL0H1{B0PAIPpVJB)I%I`vTu?+vg14zJj!1 WTg@PJLJ|l%kcePLsR&M~>AwK#HhU5P literal 0 HcmV?d00001 From aba93a3eac209c61d85cf0caff0d83c27b03aacc Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 03:18:08 -0400 Subject: [PATCH 308/329] update --- rl2/a3c/main.py | 8 ++++++++ rl2/atari/dqn_tf.py | 11 ++++++++++- rl2/atari/dqn_theano.py | 6 +++++- rl2/cartpole/dqn_tf.py | 7 +++++++ rl2/cartpole/dqn_theano.py | 4 ++++ rl2/cartpole/pg_tf.py | 7 +++++++ rl2/cartpole/pg_theano.py | 4 ++++ rl2/cartpole/q_learning.py | 4 ++++ rl2/cartpole/q_learning_bins.py | 4 ++++ rl2/cartpole/random_search.py | 4 ++++ rl2/cartpole/save_a_video.py | 6 +++++- rl2/cartpole/td_lambda.py | 5 +++++ rl2/cartpole/tf_warmup.py | 3 +++ rl2/gym_tutorial.py | 5 +++++ rl2/mountaincar/n_step.py | 4 ++++ rl2/mountaincar/pg_tf.py | 15 +++++++++++++-- rl2/mountaincar/pg_tf_random.py | 7 +++++++ rl2/mountaincar/pg_theano.py | 13 ++++++++----- rl2/mountaincar/pg_theano_random.py | 4 ++++ rl2/mountaincar/q_learning.py | 20 ++++++++++++++------ rl2/mountaincar/td_lambda.py | 10 +++++++--- 21 files changed, 132 insertions(+), 19 deletions(-) mode change 100644 => 100755 rl2/atari/dqn_tf.py mode change 100644 => 100755 rl2/atari/dqn_theano.py mode change 100644 => 100755 rl2/mountaincar/pg_tf.py mode change 100644 => 100755 rl2/mountaincar/pg_theano.py mode change 100644 => 100755 rl2/mountaincar/q_learning.py mode change 100644 => 100755 rl2/mountaincar/td_lambda.py diff --git a/rl2/a3c/main.py b/rl2/a3c/main.py index 0e7f88bf..a902ff03 100644 --- a/rl2/a3c/main.py +++ b/rl2/a3c/main.py @@ -13,6 +13,14 @@ from worker import Worker +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + + ENV_NAME = "Breakout-v0" MAX_GLOBAL_STEPS = 5e6 STEPS_PER_UPDATE = 5 diff --git a/rl2/atari/dqn_tf.py b/rl2/atari/dqn_tf.py old mode 100644 new mode 100755 index 34c1ab16..e37394d1 --- a/rl2/atari/dqn_tf.py +++ b/rl2/atari/dqn_tf.py @@ -19,7 +19,12 @@ +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") ##### testing only # MAX_EXPERIENCES = 10000 @@ -141,7 +146,11 @@ def get_minibatch(self): self.states[i] = self._get_state(idx - 1) self.new_states[i] = self._get_state(idx) - return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices] + return np.transpose(self.states, axes=(0, 2, 3, 1)), \ + self.actions[self.indices], \ + self.rewards[self.indices], \ + np.transpose(self.new_states, axes=(0, 2, 3, 1)), \ + self.terminal_flags[self.indices] class DQN: diff --git a/rl2/atari/dqn_theano.py b/rl2/atari/dqn_theano.py old mode 100644 new mode 100755 index e0114b59..0ad3b36a --- a/rl2/atari/dqn_theano.py +++ b/rl2/atari/dqn_theano.py @@ -140,7 +140,11 @@ def get_minibatch(self): self.states[i] = self._get_state(idx - 1) self.new_states[i] = self._get_state(idx) - return self.states, self.actions[self.indices], self.rewards[self.indices], self.new_states, self.terminal_flags[self.indices] + return self.states, \ + self.actions[self.indices], \ + self.rewards[self.indices], \ + self.new_states, \ + self.terminal_flags[self.indices] def init_filter(shape): diff --git a/rl2/cartpole/dqn_tf.py b/rl2/cartpole/dqn_tf.py index e397acd6..133772df 100644 --- a/rl2/cartpole/dqn_tf.py +++ b/rl2/cartpole/dqn_tf.py @@ -15,6 +15,13 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # global counter global_iters = 0 diff --git a/rl2/cartpole/dqn_theano.py b/rl2/cartpole/dqn_theano.py index 08dd2ded..18e6844c 100644 --- a/rl2/cartpole/dqn_theano.py +++ b/rl2/cartpole/dqn_theano.py @@ -16,6 +16,10 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # global counter global_iters = 0 diff --git a/rl2/cartpole/pg_tf.py b/rl2/cartpole/pg_tf.py index d5021eb7..40122df0 100644 --- a/rl2/cartpole/pg_tf.py +++ b/rl2/cartpole/pg_tf.py @@ -16,6 +16,13 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # so you can test different architectures class HiddenLayer: diff --git a/rl2/cartpole/pg_theano.py b/rl2/cartpole/pg_theano.py index 99ac7aec..16979d5f 100644 --- a/rl2/cartpole/pg_theano.py +++ b/rl2/cartpole/pg_theano.py @@ -17,6 +17,10 @@ from datetime import datetime from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # so you can test different architectures class HiddenLayer: diff --git a/rl2/cartpole/q_learning.py b/rl2/cartpole/q_learning.py index d02fbc05..3e7cc4a5 100644 --- a/rl2/cartpole/q_learning.py +++ b/rl2/cartpole/q_learning.py @@ -20,6 +20,10 @@ from sklearn.kernel_approximation import RBFSampler from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + class SGDRegressor: def __init__(self, D): diff --git a/rl2/cartpole/q_learning_bins.py b/rl2/cartpole/q_learning_bins.py index 3d3ed041..198ceb2a 100644 --- a/rl2/cartpole/q_learning_bins.py +++ b/rl2/cartpole/q_learning_bins.py @@ -15,6 +15,10 @@ from gym import wrappers from datetime import datetime +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # turns list of integers into an int # Ex. diff --git a/rl2/cartpole/random_search.py b/rl2/cartpole/random_search.py index 77ea36d6..985bcfda 100644 --- a/rl2/cartpole/random_search.py +++ b/rl2/cartpole/random_search.py @@ -9,6 +9,10 @@ import numpy as np import matplotlib.pyplot as plt +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + def get_action(s, w): return 1 if s.dot(w) > 0 else 0 diff --git a/rl2/cartpole/save_a_video.py b/rl2/cartpole/save_a_video.py index ed34c76d..e7128fc9 100644 --- a/rl2/cartpole/save_a_video.py +++ b/rl2/cartpole/save_a_video.py @@ -10,6 +10,11 @@ import numpy as np import matplotlib.pyplot as plt +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + def get_action(s, w): return 1 if s.dot(w) > 0 else 0 @@ -63,6 +68,5 @@ def random_search(env): plt.show() # play a final set of episodes - # env = wrappers.Monitor(env, 'my_awesome_dir') env = wrappers.RecordVideo(env, 'my_awesome_dir') print("***Final run with final weights***:", play_one_episode(env, params)) diff --git a/rl2/cartpole/td_lambda.py b/rl2/cartpole/td_lambda.py index ff19f627..ba9883bc 100644 --- a/rl2/cartpole/td_lambda.py +++ b/rl2/cartpole/td_lambda.py @@ -15,6 +15,11 @@ from q_learning import FeatureTransformer from q_learning_bins import plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + class SGDRegressor: def __init__(self, D): diff --git a/rl2/cartpole/tf_warmup.py b/rl2/cartpole/tf_warmup.py index 877cd54a..1cc2efee 100644 --- a/rl2/cartpole/tf_warmup.py +++ b/rl2/cartpole/tf_warmup.py @@ -7,6 +7,9 @@ import tensorflow as tf import q_learning +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + class SGDRegressor: def __init__(self, D): diff --git a/rl2/gym_tutorial.py b/rl2/gym_tutorial.py index 7a2d7dbb..ace01452 100644 --- a/rl2/gym_tutorial.py +++ b/rl2/gym_tutorial.py @@ -6,6 +6,11 @@ # Environment page: # https://gym.openai.com/envs/CartPole-v0 +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + # get the environment env = gym.make('CartPole-v0') diff --git a/rl2/mountaincar/n_step.py b/rl2/mountaincar/n_step.py index 5ef967ff..9fe0dd94 100644 --- a/rl2/mountaincar/n_step.py +++ b/rl2/mountaincar/n_step.py @@ -24,6 +24,10 @@ import q_learning from q_learning import plot_cost_to_go, FeatureTransformer, Model, plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + class SGDRegressor: def __init__(self, **kwargs): diff --git a/rl2/mountaincar/pg_tf.py b/rl2/mountaincar/pg_tf.py old mode 100644 new mode 100755 index b8c8ef59..fe04b416 --- a/rl2/mountaincar/pg_tf.py +++ b/rl2/mountaincar/pg_tf.py @@ -15,6 +15,13 @@ from datetime import datetime from q_learning import plot_running_avg, FeatureTransformer, plot_cost_to_go +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # so you can test different architectures class HiddenLayer: @@ -177,8 +184,12 @@ def play_one_td(env, pmodel, vmodel, gamma): totalreward += reward # update the models - V_next = vmodel.predict(observation) - G = reward + gamma*V_next + if done: + G = reward + else: + V_next = vmodel.predict(observation) + G = reward + gamma*V_next + advantage = G - vmodel.predict(prev_observation) pmodel.partial_fit(prev_observation, action, advantage) vmodel.partial_fit(prev_observation, G) diff --git a/rl2/mountaincar/pg_tf_random.py b/rl2/mountaincar/pg_tf_random.py index bb0d2a11..e46b7b25 100644 --- a/rl2/mountaincar/pg_tf_random.py +++ b/rl2/mountaincar/pg_tf_random.py @@ -15,6 +15,13 @@ from datetime import datetime from q_learning import plot_running_avg, FeatureTransformer +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + # so you can test different architectures class HiddenLayer: diff --git a/rl2/mountaincar/pg_theano.py b/rl2/mountaincar/pg_theano.py old mode 100644 new mode 100755 index cf1c8f01..669fc416 --- a/rl2/mountaincar/pg_theano.py +++ b/rl2/mountaincar/pg_theano.py @@ -208,7 +208,7 @@ def predict(self, X): return self.predict_op(X) -def play_one_td(env, pmodel, vmodel, gamma, train=True): +def play_one_td(env, pmodel, vmodel, gamma): observation = env.reset() done = False totalreward = 0 @@ -224,12 +224,15 @@ def play_one_td(env, pmodel, vmodel, gamma, train=True): totalreward += reward # update the models - if train: + if done: + G = reward + else: V_next = vmodel.predict(observation) G = reward + gamma*V_next - advantage = G - vmodel.predict(prev_observation) - pmodel.partial_fit(prev_observation, action, advantage) - vmodel.partial_fit(prev_observation, G) + + advantage = G - vmodel.predict(prev_observation) + pmodel.partial_fit(prev_observation, action, advantage) + vmodel.partial_fit(prev_observation, G) iters += 1 diff --git a/rl2/mountaincar/pg_theano_random.py b/rl2/mountaincar/pg_theano_random.py index 9ac07b16..c95c5971 100644 --- a/rl2/mountaincar/pg_theano_random.py +++ b/rl2/mountaincar/pg_theano_random.py @@ -16,6 +16,10 @@ from datetime import datetime from q_learning import plot_running_avg, FeatureTransformer +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # so you can test different architectures diff --git a/rl2/mountaincar/q_learning.py b/rl2/mountaincar/q_learning.py old mode 100644 new mode 100755 index 1d4be4f2..295d72aa --- a/rl2/mountaincar/q_learning.py +++ b/rl2/mountaincar/q_learning.py @@ -27,6 +27,10 @@ from sklearn.kernel_approximation import RBFSampler from sklearn.linear_model import SGDRegressor +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + # SGDRegressor defaults: # loss='squared_loss', penalty='l2', alpha=0.0001, @@ -109,9 +113,13 @@ def play_one(model, env, eps, gamma): observation, reward, done, info = env.step(action) # update the model - next = model.predict(observation) - # assert(next.shape == (1, env.action_space.n)) - G = reward + gamma*np.max(next[0]) + if done: + G = reward + else: + Qnext = model.predict(observation) + # assert(next.shape == (1, env.action_space.n)) + G = reward + gamma*np.max(Qnext[0]) + model.update(prev_observation, action, G) totalreward += reward @@ -165,14 +173,14 @@ def main(show_plots=True): N = 300 totalrewards = np.empty(N) for n in range(N): - # eps = 1.0/(0.1*n+1) - eps = 0.1*(0.97**n) + eps = 1.0/(0.1*n+1) + # eps = 0.1*(0.97**n) if n == 199: print("eps:", eps) # eps = 1.0/np.sqrt(n+1) totalreward = play_one(model, env, eps, gamma) totalrewards[n] = totalreward - if (n + 1) % 100 == 0: + if (n + 1) % 10 == 0: print("episode:", n, "total reward:", totalreward) print("avg reward for last 100 episodes:", totalrewards[-100:].mean()) print("total steps:", -totalrewards.sum()) diff --git a/rl2/mountaincar/td_lambda.py b/rl2/mountaincar/td_lambda.py old mode 100644 new mode 100755 index 4d4f292d..3d7dd8ac --- a/rl2/mountaincar/td_lambda.py +++ b/rl2/mountaincar/td_lambda.py @@ -23,6 +23,10 @@ # code we already wrote from q_learning import plot_cost_to_go, FeatureTransformer, plot_running_avg +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + class BaseModel: def __init__(self, D): @@ -83,9 +87,9 @@ def play_one(model, env, eps, gamma, lambda_): observation, reward, done, info = env.step(action) # update the model - next = model.predict(observation) - assert(next.shape == (1, env.action_space.n)) - G = reward + gamma*np.max(next[0]) + Qnext = model.predict(observation) + assert(Qnext.shape == (1, env.action_space.n)) + G = reward + gamma*np.max(Qnext[0]) model.update(prev_observation, action, G, gamma, lambda_) totalreward += reward From df43693a36e127c7ad5027a4a1a9551fa11e39c7 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 03:21:00 -0400 Subject: [PATCH 309/329] update --- cnn_class/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- cnn_class2/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- rnn_class/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- tf2.0/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- timeseries/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- transformers/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- 6 files changed, 24 insertions(+), 12 deletions(-) diff --git a/cnn_class/WHERE ARE THE NOTEBOOKS.txt b/cnn_class/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/cnn_class/WHERE ARE THE NOTEBOOKS.txt +++ b/cnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/cnn_class2/WHERE ARE THE NOTEBOOKS.txt b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/cnn_class2/WHERE ARE THE NOTEBOOKS.txt +++ b/cnn_class2/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/rnn_class/WHERE ARE THE NOTEBOOKS.txt b/rnn_class/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..5446ce25 100644 --- a/rnn_class/WHERE ARE THE NOTEBOOKS.txt +++ b/rnn_class/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/tf2.0/WHERE ARE THE NOTEBOOKS.txt b/tf2.0/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/tf2.0/WHERE ARE THE NOTEBOOKS.txt +++ b/tf2.0/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/timeseries/WHERE ARE THE NOTEBOOKS.txt b/timeseries/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/timeseries/WHERE ARE THE NOTEBOOKS.txt +++ b/timeseries/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/transformers/WHERE ARE THE NOTEBOOKS.txt b/transformers/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/transformers/WHERE ARE THE NOTEBOOKS.txt +++ b/transformers/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file From 165a089d92bfdd241dbacd3415cb07b29e02dead Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 03:22:44 -0400 Subject: [PATCH 310/329] update --- pytorch/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pytorch/WHERE ARE THE NOTEBOOKS.txt b/pytorch/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/pytorch/WHERE ARE THE NOTEBOOKS.txt +++ b/pytorch/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file From ed29adcd549fe982bc5ab5cc4c57aa46639d1da8 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 03:24:14 -0400 Subject: [PATCH 311/329] update --- calculus/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- linear_algebra/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- naive_bayes/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- nlp_v2/WHERE ARE THE NOTEBOOKS.txt | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/calculus/WHERE ARE THE NOTEBOOKS.txt b/calculus/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/calculus/WHERE ARE THE NOTEBOOKS.txt +++ b/calculus/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt b/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt index 8d29101d..5446ce25 100644 --- a/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt +++ b/chatgpt_trading/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the code" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/linear_algebra/WHERE ARE THE NOTEBOOKS.txt b/linear_algebra/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/linear_algebra/WHERE ARE THE NOTEBOOKS.txt +++ b/linear_algebra/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/naive_bayes/WHERE ARE THE NOTEBOOKS.txt b/naive_bayes/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/naive_bayes/WHERE ARE THE NOTEBOOKS.txt +++ b/naive_bayes/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/nlp_v2/WHERE ARE THE NOTEBOOKS.txt b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt index 4b0a3f50..5446ce25 100644 --- a/nlp_v2/WHERE ARE THE NOTEBOOKS.txt +++ b/nlp_v2/WHERE ARE THE NOTEBOOKS.txt @@ -1,3 +1,5 @@ -If you're here, this means you haven't watched the "where to get the notebooks" lecture very carefully! +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. -Please watch it again, and follow the instructions. \ No newline at end of file +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file From da2548ac1f0c7aaffec80f6be929ff1c82a0ab55 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 03:27:26 -0400 Subject: [PATCH 312/329] update --- rl3/a2c/a2c.py | 3 + rl3/a2c/main.py | 4 + rl3/ddpg.py | 7 + rl3/es_mujoco.py | 5 + rl3/gym_review.py | 5 + rl3/td3.py | 343 ++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 367 insertions(+) create mode 100755 rl3/td3.py diff --git a/rl3/a2c/a2c.py b/rl3/a2c/a2c.py index 3b7d3268..ce1667b1 100644 --- a/rl3/a2c/a2c.py +++ b/rl3/a2c/a2c.py @@ -5,6 +5,9 @@ import tensorflow as tf import os +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + def set_global_seeds(i): tf.set_random_seed(i) diff --git a/rl3/a2c/main.py b/rl3/a2c/main.py index 3bf85105..b42c86d9 100644 --- a/rl3/a2c/main.py +++ b/rl3/a2c/main.py @@ -11,6 +11,10 @@ import argparse import logging +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Mute missing instructions errors MODEL_PATH = 'models' diff --git a/rl3/ddpg.py b/rl3/ddpg.py index 3eb80d1c..3913cedd 100644 --- a/rl3/ddpg.py +++ b/rl3/ddpg.py @@ -5,6 +5,13 @@ import matplotlib.pyplot as plt from datetime import datetime +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + ### avoid crashing on Mac # doesn't seem to work diff --git a/rl3/es_mujoco.py b/rl3/es_mujoco.py index ce43f983..3ef4ffd9 100644 --- a/rl3/es_mujoco.py +++ b/rl3/es_mujoco.py @@ -10,6 +10,11 @@ import gym import sys +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + # environment ENV_NAME = 'HalfCheetah-v2' diff --git a/rl3/gym_review.py b/rl3/gym_review.py index 26733a58..3be2ac98 100644 --- a/rl3/gym_review.py +++ b/rl3/gym_review.py @@ -3,6 +3,11 @@ import numpy as np import matplotlib.pyplot as plt +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + + def get_action(s, w): return 1 if s.dot(w) > 0 else 0 diff --git a/rl3/td3.py b/rl3/td3.py new file mode 100755 index 00000000..fbec1095 --- /dev/null +++ b/rl3/td3.py @@ -0,0 +1,343 @@ +import numpy as np +import tensorflow as tf +import gym +import matplotlib.pyplot as plt +from datetime import datetime + +gym_minor_version = int(gym.__version__.split('.')[1]) +if gym_minor_version >= 19: + exit("Please install OpenAI Gym 0.19.0 or earlier") + +if tf.__version__.startswith('2'): + exit("Please install Tensorflow 1.x") + + +### avoid crashing on Mac +# doesn't seem to work +from sys import platform as sys_pf +if sys_pf == 'darwin': + import matplotlib + matplotlib.use("TkAgg") + + +# simple feedforward neural net +def ANN(x, layer_sizes, hidden_activation=tf.nn.relu, output_activation=None): + for h in layer_sizes[:-1]: + x = tf.layers.dense(x, units=h, activation=hidden_activation) + return tf.layers.dense(x, units=layer_sizes[-1], activation=output_activation) + + +# get all variables within a scope +def get_vars(scope): + return [x for x in tf.global_variables() if scope in x.name] + + +### Create both the actor and critic networks at once ### +### Q(s, mu(s)) returns the maximum Q for a given state s ### +def CreateNetworks( + s, a, + num_actions, + action_max, + hidden_sizes=(300,), + hidden_activation=tf.nn.relu, + output_activation=tf.tanh): + + with tf.variable_scope('mu'): + mu = action_max * ANN(s, list(hidden_sizes)+[num_actions], hidden_activation, output_activation) + with tf.variable_scope('q1'): + input_ = tf.concat([s, a], axis=-1) # (state, action) + q1 = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) + with tf.variable_scope('q2'): + input_ = tf.concat([s, a], axis=-1) # (state, action) + q2 = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) + with tf.variable_scope('q1', reuse=True): + # reuse is True, so it reuses the weights from the previously defined Q network + input_ = tf.concat([s, mu], axis=-1) # (state, mu(state)) + q1_mu = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) + return mu, q1, q2, q1_mu + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros([size, act_dim], dtype=np.float32) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.float32) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + +### Implement the TD3 algorithm ### +def td3( + env_fn, + ac_kwargs=dict(), + seed=0, + save_folder=None, + num_train_episodes=100, + test_agent_every=25, + replay_size=int(1e6), + gamma=0.99, + decay=0.995, + mu_lr=1e-3, + q_lr=1e-3, + batch_size=100, + start_steps=10000, + action_noise=0.1, + target_noise=0.2, + noise_clip=0.5, + policy_delay=2, + max_episode_length=1000): + + tf.set_random_seed(seed) + np.random.seed(seed) + + env, test_env = env_fn(), env_fn() + + # comment out this line if you don't want to record a video of the agent + if save_folder is not None: + test_env = gym.wrappers.Monitor(test_env, save_folder) + + # get size of state space and action space + num_states = env.observation_space.shape[0] + num_actions = env.action_space.shape[0] + + # Maximum value of action + # Assumes both low and high values are the same + # Assumes all actions have the same bounds + # May NOT be the case for all environments + action_max = env.action_space.high[0] + + # Create Tensorflow placeholders (neural network inputs) + X = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # state + A = tf.placeholder(dtype=tf.float32, shape=(None, num_actions)) # action + X2 = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # next state + R = tf.placeholder(dtype=tf.float32, shape=(None,)) # reward + D = tf.placeholder(dtype=tf.float32, shape=(None,)) # done + + # Main network outputs + with tf.variable_scope('main'): + mu, q1, q2, q1_mu = CreateNetworks(X, A, num_actions, action_max, **ac_kwargs) + + # Target networks + # First, get the output policy given next state X2 + with tf.variable_scope('target'): + # Note: "A" placeholder is effectively ignored + # since mu is only a function of state (X2) + mu_targ, _, _, _ = CreateNetworks(X2, A, num_actions, action_max, **ac_kwargs) + + # Next, add noise to mu_targ, before passing it through the target Q-networks + with tf.variable_scope('target', reuse=True): + # Add Gaussian noise and clip to valid action range + epsilon = tf.random_normal(tf.shape(mu_targ), stddev=target_noise) + epsilon = tf.clip_by_value(epsilon, -noise_clip, noise_clip) + A2 = mu_targ + epsilon + A2 = tf.clip_by_value(A2, -action_max, action_max) + + _, q1_targ, q2_targ, _ = CreateNetworks(X2, A2, num_actions, action_max, **ac_kwargs) + + # Experience replay memory + replay_buffer = ReplayBuffer(obs_dim=num_states, act_dim=num_actions, size=replay_size) + + + # Target value for the Q-network loss + # We use stop_gradient to tell Tensorflow not to differentiate + # Take the smaller of Q1 and Q2! + min_q_targ = tf.minimum(q1_targ, q2_targ) + q_target = tf.stop_gradient(R + gamma * (1 - D) * min_q_targ) + + # TD3 losses + mu_loss = -tf.reduce_mean(q1_mu) + q1_loss = tf.reduce_mean((q1 - q_target)**2) + q2_loss = tf.reduce_mean((q2 - q_target)**2) + q_loss = q1_loss + q2_loss # minimize simultaneously + + # Train policy and value separately + mu_optimizer = tf.train.AdamOptimizer(learning_rate=mu_lr) + q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr) + mu_train_op = mu_optimizer.minimize(mu_loss, var_list=get_vars('main/mu')) + q_train_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q')) + + # Use soft updates to update the target networks + target_update = tf.group( + [tf.assign(v_targ, decay*v_targ + (1 - decay)*v_main) + for v_main, v_targ in zip(get_vars('main'), get_vars('target')) + ] + ) + + # Copy main network params to target networks + target_init = tf.group( + [tf.assign(v_targ, v_main) + for v_main, v_targ in zip(get_vars('main'), get_vars('target')) + ] + ) + + # boilerplate (and copy to the target networks!) + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + sess.run(target_init) + + def get_action(s, noise_scale): + a = sess.run(mu, feed_dict={X: s.reshape(1,-1)})[0] + a += noise_scale * np.random.randn(num_actions) + return np.clip(a, -action_max, action_max) + + test_returns = [] + def test_agent(num_episodes=5): + t0 = datetime.now() + n_steps = 0 + for j in range(num_episodes): + s, episode_return, episode_length, d = test_env.reset(), 0, 0, False + while not (d or (episode_length == max_episode_length)): + # Take deterministic actions at test time (noise_scale=0) + test_env.render() + s, r, d, _ = test_env.step(get_action(s, 0)) + episode_return += r + episode_length += 1 + n_steps += 1 + print('test return:', episode_return, 'episode_length:', episode_length) + test_returns.append(episode_return) + # print("test steps per sec:", n_steps / (datetime.now() - t0).total_seconds()) + + + # Main loop: play episode and train + returns = [] + q_losses = [] + mu_losses = [] + num_steps = 0 + for i_episode in range(num_train_episodes): + + # reset env + s, episode_return, episode_length, d = env.reset(), 0, 0, False + + while not (d or (episode_length == max_episode_length)): + # For the first `start_steps` steps, use randomly sampled actions + # in order to encourage exploration. + if num_steps > start_steps: + a = get_action(s, action_noise) + else: + a = env.action_space.sample() + + # Keep track of the number of steps done + num_steps += 1 + if num_steps == start_steps: + print("USING AGENT ACTIONS NOW") + + # Step the env + s2, r, d, _ = env.step(a) + episode_return += r + episode_length += 1 + + # Ignore the "done" signal if it comes from hitting the time + # horizon (that is, when it's an artificial terminal signal + # that isn't based on the agent's state) + d_store = False if episode_length == max_episode_length else d + + # Store experience to replay buffer + replay_buffer.store(s, a, r, s2, d_store) + + # Assign next state to be the current state on the next round + s = s2 + + # Perform the updates + for j in range(episode_length): + batch = replay_buffer.sample_batch(batch_size) + feed_dict = { + X: batch['s'], + X2: batch['s2'], + A: batch['a'], + R: batch['r'], + D: batch['d'] + } + + # Q network update + # Note: plot the Q loss if you want + ql, _ = sess.run([q_loss, q_train_op], feed_dict) + q_losses.append(ql) + + # Policy update + # (And target networks update) + # Note: plot the mu loss if you want + if j % policy_delay == 0: + mul, _, _ = sess.run([mu_loss, mu_train_op, target_update], feed_dict) + mu_losses.append(mul) + + print("Episode:", i_episode + 1, "Return:", episode_return, 'episode_length:', episode_length) + returns.append(episode_return) + + # Test the agent + if i_episode > 0 and i_episode % test_agent_every == 0: + test_agent() + + # on Mac, plotting results in an error, so just save the results for later + # if you're not on Mac, feel free to uncomment the below lines + np.savez('td3_results.npz', train=returns, test=test_returns, q_losses=q_losses, mu_losses=mu_losses) + + # plt.plot(returns) + # plt.plot(smooth(np.array(returns))) + # plt.title("Train returns") + # plt.show() + + # plt.plot(test_returns) + # plt.plot(smooth(np.array(test_returns))) + # plt.title("Test returns") + # plt.show() + + # plt.plot(q_losses) + # plt.title('q_losses') + # plt.show() + + # plt.plot(mu_losses) + # plt.title('mu_losses') + # plt.show() + + +def smooth(x): + # last 100 + n = len(x) + y = np.zeros(n) + for i in range(n): + start = max(0, i - 99) + y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) + return y + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + # parser.add_argument('--env', type=str, default='HalfCheetah-v2') + parser.add_argument('--env', type=str, default='Pendulum-v0') + parser.add_argument('--hidden_layer_sizes', type=int, default=300) + parser.add_argument('--num_layers', type=int, default=1) + parser.add_argument('--gamma', type=float, default=0.99) + parser.add_argument('--seed', type=int, default=0) + parser.add_argument('--num_train_episodes', type=int, default=200) + parser.add_argument('--save_folder', type=str, default='td3_monitor') + args = parser.parse_args() + + + td3( + lambda : gym.make(args.env), + ac_kwargs=dict(hidden_sizes=[args.hidden_layer_sizes]*args.num_layers), + gamma=args.gamma, + seed=args.seed, + save_folder=args.save_folder, + num_train_episodes=args.num_train_episodes, + ) From dbec19bbb81b69f072bf0be30dcac90dbbdffc4a Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 03:29:43 -0400 Subject: [PATCH 313/329] update --- rl3/td3.py | 343 ----------------------------------------------------- 1 file changed, 343 deletions(-) delete mode 100755 rl3/td3.py diff --git a/rl3/td3.py b/rl3/td3.py deleted file mode 100755 index fbec1095..00000000 --- a/rl3/td3.py +++ /dev/null @@ -1,343 +0,0 @@ -import numpy as np -import tensorflow as tf -import gym -import matplotlib.pyplot as plt -from datetime import datetime - -gym_minor_version = int(gym.__version__.split('.')[1]) -if gym_minor_version >= 19: - exit("Please install OpenAI Gym 0.19.0 or earlier") - -if tf.__version__.startswith('2'): - exit("Please install Tensorflow 1.x") - - -### avoid crashing on Mac -# doesn't seem to work -from sys import platform as sys_pf -if sys_pf == 'darwin': - import matplotlib - matplotlib.use("TkAgg") - - -# simple feedforward neural net -def ANN(x, layer_sizes, hidden_activation=tf.nn.relu, output_activation=None): - for h in layer_sizes[:-1]: - x = tf.layers.dense(x, units=h, activation=hidden_activation) - return tf.layers.dense(x, units=layer_sizes[-1], activation=output_activation) - - -# get all variables within a scope -def get_vars(scope): - return [x for x in tf.global_variables() if scope in x.name] - - -### Create both the actor and critic networks at once ### -### Q(s, mu(s)) returns the maximum Q for a given state s ### -def CreateNetworks( - s, a, - num_actions, - action_max, - hidden_sizes=(300,), - hidden_activation=tf.nn.relu, - output_activation=tf.tanh): - - with tf.variable_scope('mu'): - mu = action_max * ANN(s, list(hidden_sizes)+[num_actions], hidden_activation, output_activation) - with tf.variable_scope('q1'): - input_ = tf.concat([s, a], axis=-1) # (state, action) - q1 = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) - with tf.variable_scope('q2'): - input_ = tf.concat([s, a], axis=-1) # (state, action) - q2 = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) - with tf.variable_scope('q1', reuse=True): - # reuse is True, so it reuses the weights from the previously defined Q network - input_ = tf.concat([s, mu], axis=-1) # (state, mu(state)) - q1_mu = tf.squeeze(ANN(input_, list(hidden_sizes)+[1], hidden_activation, None), axis=1) - return mu, q1, q2, q1_mu - - -### The experience replay memory ### -class ReplayBuffer: - def __init__(self, obs_dim, act_dim, size): - self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) - self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) - self.acts_buf = np.zeros([size, act_dim], dtype=np.float32) - self.rews_buf = np.zeros(size, dtype=np.float32) - self.done_buf = np.zeros(size, dtype=np.float32) - self.ptr, self.size, self.max_size = 0, 0, size - - def store(self, obs, act, rew, next_obs, done): - self.obs1_buf[self.ptr] = obs - self.obs2_buf[self.ptr] = next_obs - self.acts_buf[self.ptr] = act - self.rews_buf[self.ptr] = rew - self.done_buf[self.ptr] = done - self.ptr = (self.ptr+1) % self.max_size - self.size = min(self.size+1, self.max_size) - - def sample_batch(self, batch_size=32): - idxs = np.random.randint(0, self.size, size=batch_size) - return dict(s=self.obs1_buf[idxs], - s2=self.obs2_buf[idxs], - a=self.acts_buf[idxs], - r=self.rews_buf[idxs], - d=self.done_buf[idxs]) - - -### Implement the TD3 algorithm ### -def td3( - env_fn, - ac_kwargs=dict(), - seed=0, - save_folder=None, - num_train_episodes=100, - test_agent_every=25, - replay_size=int(1e6), - gamma=0.99, - decay=0.995, - mu_lr=1e-3, - q_lr=1e-3, - batch_size=100, - start_steps=10000, - action_noise=0.1, - target_noise=0.2, - noise_clip=0.5, - policy_delay=2, - max_episode_length=1000): - - tf.set_random_seed(seed) - np.random.seed(seed) - - env, test_env = env_fn(), env_fn() - - # comment out this line if you don't want to record a video of the agent - if save_folder is not None: - test_env = gym.wrappers.Monitor(test_env, save_folder) - - # get size of state space and action space - num_states = env.observation_space.shape[0] - num_actions = env.action_space.shape[0] - - # Maximum value of action - # Assumes both low and high values are the same - # Assumes all actions have the same bounds - # May NOT be the case for all environments - action_max = env.action_space.high[0] - - # Create Tensorflow placeholders (neural network inputs) - X = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # state - A = tf.placeholder(dtype=tf.float32, shape=(None, num_actions)) # action - X2 = tf.placeholder(dtype=tf.float32, shape=(None, num_states)) # next state - R = tf.placeholder(dtype=tf.float32, shape=(None,)) # reward - D = tf.placeholder(dtype=tf.float32, shape=(None,)) # done - - # Main network outputs - with tf.variable_scope('main'): - mu, q1, q2, q1_mu = CreateNetworks(X, A, num_actions, action_max, **ac_kwargs) - - # Target networks - # First, get the output policy given next state X2 - with tf.variable_scope('target'): - # Note: "A" placeholder is effectively ignored - # since mu is only a function of state (X2) - mu_targ, _, _, _ = CreateNetworks(X2, A, num_actions, action_max, **ac_kwargs) - - # Next, add noise to mu_targ, before passing it through the target Q-networks - with tf.variable_scope('target', reuse=True): - # Add Gaussian noise and clip to valid action range - epsilon = tf.random_normal(tf.shape(mu_targ), stddev=target_noise) - epsilon = tf.clip_by_value(epsilon, -noise_clip, noise_clip) - A2 = mu_targ + epsilon - A2 = tf.clip_by_value(A2, -action_max, action_max) - - _, q1_targ, q2_targ, _ = CreateNetworks(X2, A2, num_actions, action_max, **ac_kwargs) - - # Experience replay memory - replay_buffer = ReplayBuffer(obs_dim=num_states, act_dim=num_actions, size=replay_size) - - - # Target value for the Q-network loss - # We use stop_gradient to tell Tensorflow not to differentiate - # Take the smaller of Q1 and Q2! - min_q_targ = tf.minimum(q1_targ, q2_targ) - q_target = tf.stop_gradient(R + gamma * (1 - D) * min_q_targ) - - # TD3 losses - mu_loss = -tf.reduce_mean(q1_mu) - q1_loss = tf.reduce_mean((q1 - q_target)**2) - q2_loss = tf.reduce_mean((q2 - q_target)**2) - q_loss = q1_loss + q2_loss # minimize simultaneously - - # Train policy and value separately - mu_optimizer = tf.train.AdamOptimizer(learning_rate=mu_lr) - q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr) - mu_train_op = mu_optimizer.minimize(mu_loss, var_list=get_vars('main/mu')) - q_train_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q')) - - # Use soft updates to update the target networks - target_update = tf.group( - [tf.assign(v_targ, decay*v_targ + (1 - decay)*v_main) - for v_main, v_targ in zip(get_vars('main'), get_vars('target')) - ] - ) - - # Copy main network params to target networks - target_init = tf.group( - [tf.assign(v_targ, v_main) - for v_main, v_targ in zip(get_vars('main'), get_vars('target')) - ] - ) - - # boilerplate (and copy to the target networks!) - sess = tf.Session() - sess.run(tf.global_variables_initializer()) - sess.run(target_init) - - def get_action(s, noise_scale): - a = sess.run(mu, feed_dict={X: s.reshape(1,-1)})[0] - a += noise_scale * np.random.randn(num_actions) - return np.clip(a, -action_max, action_max) - - test_returns = [] - def test_agent(num_episodes=5): - t0 = datetime.now() - n_steps = 0 - for j in range(num_episodes): - s, episode_return, episode_length, d = test_env.reset(), 0, 0, False - while not (d or (episode_length == max_episode_length)): - # Take deterministic actions at test time (noise_scale=0) - test_env.render() - s, r, d, _ = test_env.step(get_action(s, 0)) - episode_return += r - episode_length += 1 - n_steps += 1 - print('test return:', episode_return, 'episode_length:', episode_length) - test_returns.append(episode_return) - # print("test steps per sec:", n_steps / (datetime.now() - t0).total_seconds()) - - - # Main loop: play episode and train - returns = [] - q_losses = [] - mu_losses = [] - num_steps = 0 - for i_episode in range(num_train_episodes): - - # reset env - s, episode_return, episode_length, d = env.reset(), 0, 0, False - - while not (d or (episode_length == max_episode_length)): - # For the first `start_steps` steps, use randomly sampled actions - # in order to encourage exploration. - if num_steps > start_steps: - a = get_action(s, action_noise) - else: - a = env.action_space.sample() - - # Keep track of the number of steps done - num_steps += 1 - if num_steps == start_steps: - print("USING AGENT ACTIONS NOW") - - # Step the env - s2, r, d, _ = env.step(a) - episode_return += r - episode_length += 1 - - # Ignore the "done" signal if it comes from hitting the time - # horizon (that is, when it's an artificial terminal signal - # that isn't based on the agent's state) - d_store = False if episode_length == max_episode_length else d - - # Store experience to replay buffer - replay_buffer.store(s, a, r, s2, d_store) - - # Assign next state to be the current state on the next round - s = s2 - - # Perform the updates - for j in range(episode_length): - batch = replay_buffer.sample_batch(batch_size) - feed_dict = { - X: batch['s'], - X2: batch['s2'], - A: batch['a'], - R: batch['r'], - D: batch['d'] - } - - # Q network update - # Note: plot the Q loss if you want - ql, _ = sess.run([q_loss, q_train_op], feed_dict) - q_losses.append(ql) - - # Policy update - # (And target networks update) - # Note: plot the mu loss if you want - if j % policy_delay == 0: - mul, _, _ = sess.run([mu_loss, mu_train_op, target_update], feed_dict) - mu_losses.append(mul) - - print("Episode:", i_episode + 1, "Return:", episode_return, 'episode_length:', episode_length) - returns.append(episode_return) - - # Test the agent - if i_episode > 0 and i_episode % test_agent_every == 0: - test_agent() - - # on Mac, plotting results in an error, so just save the results for later - # if you're not on Mac, feel free to uncomment the below lines - np.savez('td3_results.npz', train=returns, test=test_returns, q_losses=q_losses, mu_losses=mu_losses) - - # plt.plot(returns) - # plt.plot(smooth(np.array(returns))) - # plt.title("Train returns") - # plt.show() - - # plt.plot(test_returns) - # plt.plot(smooth(np.array(test_returns))) - # plt.title("Test returns") - # plt.show() - - # plt.plot(q_losses) - # plt.title('q_losses') - # plt.show() - - # plt.plot(mu_losses) - # plt.title('mu_losses') - # plt.show() - - -def smooth(x): - # last 100 - n = len(x) - y = np.zeros(n) - for i in range(n): - start = max(0, i - 99) - y[i] = float(x[start:(i+1)].sum()) / (i - start + 1) - return y - - -if __name__ == '__main__': - import argparse - parser = argparse.ArgumentParser() - # parser.add_argument('--env', type=str, default='HalfCheetah-v2') - parser.add_argument('--env', type=str, default='Pendulum-v0') - parser.add_argument('--hidden_layer_sizes', type=int, default=300) - parser.add_argument('--num_layers', type=int, default=1) - parser.add_argument('--gamma', type=float, default=0.99) - parser.add_argument('--seed', type=int, default=0) - parser.add_argument('--num_train_episodes', type=int, default=200) - parser.add_argument('--save_folder', type=str, default='td3_monitor') - args = parser.parse_args() - - - td3( - lambda : gym.make(args.env), - ac_kwargs=dict(hidden_sizes=[args.hidden_layer_sizes]*args.num_layers), - gamma=args.gamma, - seed=args.seed, - save_folder=args.save_folder, - num_train_episodes=args.num_train_episodes, - ) From 0622e2171f00d21c38ab1a5c894da9949fded8af Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 29 Apr 2024 15:09:46 -0400 Subject: [PATCH 314/329] update --- openai/extra_reading.txt | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/openai/extra_reading.txt b/openai/extra_reading.txt index 4d413ff9..776b62c7 100644 --- a/openai/extra_reading.txt +++ b/openai/extra_reading.txt @@ -11,4 +11,11 @@ Large Language Models are Zero-Shot Reasoners (CoT) https://arxiv.org/abs/2205.11916 Chain-of-Thought Prompting Elicits Reasoning in Large Language Models -https://arxiv.org/abs/2201.11903 \ No newline at end of file +https://arxiv.org/abs/2201.11903 + +A much better example of "ELI5" +https://www.reddit.com/r/ChatGPT/comments/1c5s51g/my_mother_and_i_had_difficulty_understanding_my + +What is RAG? — Retrieval-Augmented Generation Explained +https://medium.com/@lazyprogrammerofficial/what-is-rag-retrieval-augmented-generation-explained-148c8bb9c00f +https://lazyprogrammer.me/what-is-rag-retrieval-augmented-generation-explained/ \ No newline at end of file From abc83f5ce4c6cbee64f23611e8f9b9805accf5c5 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 14 May 2024 16:55:09 -0400 Subject: [PATCH 315/329] update --- supervised_class2/rf_regression.py | 1 + 1 file changed, 1 insertion(+) diff --git a/supervised_class2/rf_regression.py b/supervised_class2/rf_regression.py index 06ee72fb..ae31cef4 100644 --- a/supervised_class2/rf_regression.py +++ b/supervised_class2/rf_regression.py @@ -1,6 +1,7 @@ # https://deeplearningcourses.com/c/machine-learning-in-python-random-forest-adaboost # https://www.udemy.com/machine-learning-in-python-random-forest-adaboost # uses house dataset from https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ +# Alternate data source: https://archive.org/download/housing_202405/housing.data # put all files in the folder ../large_files from __future__ import print_function, division from future.utils import iteritems From 3aaa09b330024eb057c033193eaecc3f0f8f076b Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 24 May 2024 06:03:16 -0400 Subject: [PATCH 316/329] update --- hmm_class/sites.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/hmm_class/sites.py b/hmm_class/sites.py index 617863f5..0187e03a 100644 --- a/hmm_class/sites.py +++ b/hmm_class/sites.py @@ -2,6 +2,8 @@ # https://udemy.com/unsupervised-machine-learning-hidden-markov-models-in-python # http://lazyprogrammer.me # Create a Markov model for site data. +from __future__ import print_function, division +from future.utils import iteritems import numpy as np transitions = {} @@ -14,19 +16,19 @@ row_sums[s] = row_sums.get(s, 0.) + 1 # normalize -for k, v in transitions.iteritems(): +for k, v in iteritems(transitions): s, e = k transitions[k] = v / row_sums[s] # initial state distribution -print "initial state distribution:" -for k, v in transitions.iteritems(): +print("initial state distribution:") +for k, v in iteritems(transitions): s, e = k if s == '-1': - print e, v + print(e, v) # which page has the highest bounce? -for k, v in transitions.iteritems(): +for k, v in iteritems(transitions): s, e = k if e == 'B': - print "bounce rate for %s: %s" % (s, v) + print("bounce rate for %s: %s" % (s, v)) From bf9388ae8c0ebc4a5d8ce0c630dbbad9af638052 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 23 Jul 2024 01:14:09 -0400 Subject: [PATCH 317/329] update --- rl2/mountaincar/n_step.py | 13 ++++++------- rl2/mountaincar/q_learning.py | 10 +++------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/rl2/mountaincar/n_step.py b/rl2/mountaincar/n_step.py index 9fe0dd94..628fdbcf 100644 --- a/rl2/mountaincar/n_step.py +++ b/rl2/mountaincar/n_step.py @@ -24,10 +24,6 @@ import q_learning from q_learning import plot_cost_to_go, FeatureTransformer, Model, plot_running_avg -gym_minor_version = int(gym.__version__.split('.')[1]) -if gym_minor_version >= 19: - exit("Please install OpenAI Gym 0.19.0 or earlier") - class SGDRegressor: def __init__(self, **kwargs): @@ -58,7 +54,7 @@ def predict(self, X): # returns a list of states_and_rewards, and the total reward def play_one(model, eps, gamma, n=5): - observation = env.reset() + observation = env.reset()[0] done = False totalreward = 0 rewards = [] @@ -77,7 +73,7 @@ def play_one(model, eps, gamma, n=5): actions.append(action) prev_observation = observation - observation, reward, done, info = env.step(action) + observation, reward, done, truncated, info = env.step(action) rewards.append(reward) @@ -85,7 +81,10 @@ def play_one(model, eps, gamma, n=5): if len(rewards) >= n: # return_up_to_prediction = calculate_return_before_prediction(rewards, gamma) return_up_to_prediction = multiplier.dot(rewards[-n:]) - G = return_up_to_prediction + (gamma**n)*np.max(model.predict(observation)[0]) + action_values = model.predict(observation)[0] + # print("action_values.shape:", action_values.shape) + G = return_up_to_prediction + (gamma**n)*np.max(action_values) + # print("G:", G) model.update(states[-n], actions[-n], G) # if len(rewards) > n: diff --git a/rl2/mountaincar/q_learning.py b/rl2/mountaincar/q_learning.py index 295d72aa..129d67e0 100755 --- a/rl2/mountaincar/q_learning.py +++ b/rl2/mountaincar/q_learning.py @@ -27,10 +27,6 @@ from sklearn.kernel_approximation import RBFSampler from sklearn.linear_model import SGDRegressor -gym_minor_version = int(gym.__version__.split('.')[1]) -if gym_minor_version >= 19: - exit("Please install OpenAI Gym 0.19.0 or earlier") - # SGDRegressor defaults: # loss='squared_loss', penalty='l2', alpha=0.0001, @@ -74,7 +70,7 @@ def __init__(self, env, feature_transformer, learning_rate): self.feature_transformer = feature_transformer for i in range(env.action_space.n): model = SGDRegressor(learning_rate=learning_rate) - model.partial_fit(feature_transformer.transform( [env.reset()] ), [0]) + model.partial_fit(feature_transformer.transform( [env.reset()[0]] ), [0]) self.models.append(model) def predict(self, s): @@ -103,14 +99,14 @@ def sample_action(self, s, eps): # returns a list of states_and_rewards, and the total reward def play_one(model, env, eps, gamma): - observation = env.reset() + observation = env.reset()[0] done = False totalreward = 0 iters = 0 while not done and iters < 10000: action = model.sample_action(observation, eps) prev_observation = observation - observation, reward, done, info = env.step(action) + observation, reward, done, truncated, info = env.step(action) # update the model if done: From c096c5a862f821b416e9c9a6a9a7c5c6eb030615 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 13 Aug 2024 05:40:58 -0400 Subject: [PATCH 318/329] update --- unsupervised_class/tweets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsupervised_class/tweets.py b/unsupervised_class/tweets.py index aeb4552a..6ffba008 100644 --- a/unsupervised_class/tweets.py +++ b/unsupervised_class/tweets.py @@ -66,7 +66,7 @@ def filter_tweet(s): # transform the text into a data matrix tfidf = TfidfVectorizer(max_features=100, stop_words=stopwords) -X = tfidf.fit_transform(text).todense() +X = tfidf.fit_transform(text).asformat('array') # subsample for efficiency From 4442e4a97e28eb8ea78aadadfc15bd113d190dba Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 22 Oct 2024 06:04:37 -0400 Subject: [PATCH 319/329] update --- probability/WHERE ARE THE NOTEBOOKS.txt | 5 +++++ probability/extra_reading.txt | 2 ++ 2 files changed, 7 insertions(+) create mode 100644 probability/WHERE ARE THE NOTEBOOKS.txt create mode 100644 probability/extra_reading.txt diff --git a/probability/WHERE ARE THE NOTEBOOKS.txt b/probability/WHERE ARE THE NOTEBOOKS.txt new file mode 100644 index 00000000..5446ce25 --- /dev/null +++ b/probability/WHERE ARE THE NOTEBOOKS.txt @@ -0,0 +1,5 @@ +As stated in the "where to get the code" / "where to get the notebooks" lecture, the notebooks are NOT on Github. + +If you missed this, please review the lecture for the actual location of the notebooks. + +If, after reviewing it, you still need assistance, please contact info@deeplearningcourses.com. \ No newline at end of file diff --git a/probability/extra_reading.txt b/probability/extra_reading.txt new file mode 100644 index 00000000..e2df1a0d --- /dev/null +++ b/probability/extra_reading.txt @@ -0,0 +1,2 @@ +Multivariate Change of Variables +https://math.libretexts.org/Bookshelves/Calculus/Book%3A_Active_Calculus_(Boelkins_et_al.)/11%3A_Multiple_Integrals/11.09%3A_Change_of_Variables \ No newline at end of file From dc6914a516d30b3936bfac352dcb5e24e1cd6e29 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 18 Dec 2024 01:18:28 -0500 Subject: [PATCH 320/329] update --- rl/extra_reading.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rl/extra_reading.txt b/rl/extra_reading.txt index 64dd9812..fac79d64 100644 --- a/rl/extra_reading.txt +++ b/rl/extra_reading.txt @@ -1,6 +1,9 @@ Finite-time Analysis of the Multiarmed Bandit Problem https://homes.di.unimi.it/cesa-bianchi/Pubblicazioni/ml-02.pdf +A Nice Lecture for Students Who Claim "RL Doesn't Use Math" +https://www.youtube.com/watch?v=dhEF5pfYmvc + Hacking Google reCAPTCHA v3 using Reinforcement Learning https://arxiv.org/pdf/1903.01003.pdf From da737182029b5a16e014493ea88db8b60eadfa45 Mon Sep 17 00:00:00 2001 From: Bob Date: Sun, 5 Jan 2025 03:25:37 -0500 Subject: [PATCH 321/329] readme --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 88841a1f..b75ae2dd 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,10 @@ https://deeplearningcourses.com/c/deep-learning-tensorflow-2 https://deeplearningcourses.com/c/linear-algebra-data-science +**Math 0-1: Probability for Data Science & Machine Learning** + +https://deeplearningcourses.com/c/probability-data-science-machine-learning + Deep Learning Courses Exclusives ================================ @@ -91,6 +95,9 @@ https://deeplearningcourses.com/c/matlab Other Course Links ================== +Generative AI: ChatGPT & OpenAI LLMs in Python +https://deeplearningcourses.com/c/genai-openai-chatgpt + Math 0-1: Matrix Calculus for Data Science & Machine Learning https://deeplearningcourses.com/c/matrix-calculus-machine-learning From 8d735ed8e5748ee064ee77f080c724a76509c4ce Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 18 Feb 2025 15:31:20 -0500 Subject: [PATCH 322/329] update --- tf2.0/keras_trader.py | 421 ++++++++++++++++++++++++++++++++++++++++++ tf2.0/mlp_trader.py | 401 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 822 insertions(+) create mode 100644 tf2.0/keras_trader.py create mode 100644 tf2.0/mlp_trader.py diff --git a/tf2.0/keras_trader.py b/tf2.0/keras_trader.py new file mode 100644 index 00000000..21d693e1 --- /dev/null +++ b/tf2.0/keras_trader.py @@ -0,0 +1,421 @@ +import numpy as np +import pandas as pd + +# must do this BEFORE importing keras +import os +os.environ["KERAS_BACKEND"] = "jax" + +from keras.models import Model +from keras.layers import Dense, Input +from keras.optimizers import Adam + +from datetime import datetime +import itertools +import argparse +import re +import pickle + +from sklearn.preprocessing import StandardScaler + + +import keras.backend as K +print("Using backend:", K.backend()) + +# import tensorflow as tf +# if tf.__version__.startswith('2'): +# tf.compat.v1.disable_eager_execution() + + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +def mlp(input_dim, n_action, n_hidden_layers=1, hidden_dim=32): + """ A multi-layer perceptron """ + + # input layer + i = Input(shape=(input_dim,)) + x = i + + # hidden layers + for _ in range(n_hidden_layers): + x = Dense(hidden_dim, activation='relu')(x) + + # final layer + x = Dense(n_action)(x) + + # make the model + model = Model(i, x) + + model.compile(loss='mse', optimizer='adam') + print((model.summary())) + return model + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = mlp(state_size, action_size) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state, verbose=0) + return np.argmax(act_values[0]) # returns action + + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the tentative target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states, verbose=0), axis=1) + + # With the Keras API, the target (usually) must have the same + # shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = self.model.predict(states, verbose=0) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + self.model.train_on_batch(states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + self.model.load_weights(name) + + + def save(self, name): + self.model.save_weights(name) + + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + model_file = 'dqn.weights.h5' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/{model_file}') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/{model_file}') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) diff --git a/tf2.0/mlp_trader.py b/tf2.0/mlp_trader.py new file mode 100644 index 00000000..91b3463b --- /dev/null +++ b/tf2.0/mlp_trader.py @@ -0,0 +1,401 @@ +import numpy as np +import pandas as pd + +from sklearn.neural_network import MLPRegressor +from sklearn.preprocessing import StandardScaler + +from datetime import datetime +import itertools +import argparse +import re +import os +import pickle + + +# Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) +def get_data(): + # returns a T x 3 list of stock prices + # each row is a different stock + # 0 = AAPL + # 1 = MSI + # 2 = SBUX + df = pd.read_csv('aapl_msi_sbux.csv') + return df.values + + + +### The experience replay memory ### +class ReplayBuffer: + def __init__(self, obs_dim, act_dim, size): + self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32) + self.acts_buf = np.zeros(size, dtype=np.uint8) + self.rews_buf = np.zeros(size, dtype=np.float32) + self.done_buf = np.zeros(size, dtype=np.uint8) + self.ptr, self.size, self.max_size = 0, 0, size + + def store(self, obs, act, rew, next_obs, done): + self.obs1_buf[self.ptr] = obs + self.obs2_buf[self.ptr] = next_obs + self.acts_buf[self.ptr] = act + self.rews_buf[self.ptr] = rew + self.done_buf[self.ptr] = done + self.ptr = (self.ptr+1) % self.max_size + self.size = min(self.size+1, self.max_size) + + def sample_batch(self, batch_size=32): + idxs = np.random.randint(0, self.size, size=batch_size) + return dict(s=self.obs1_buf[idxs], + s2=self.obs2_buf[idxs], + a=self.acts_buf[idxs], + r=self.rews_buf[idxs], + d=self.done_buf[idxs]) + + + + + +def get_scaler(env): + # return scikit-learn scaler object to scale the states + # Note: you could also populate the replay buffer here + + states = [] + for _ in range(env.n_step): + action = np.random.choice(env.action_space) + state, reward, done, info = env.step(action) + states.append(state) + if done: + break + + scaler = StandardScaler() + scaler.fit(states) + return scaler + + + + +def maybe_make_dir(directory): + if not os.path.exists(directory): + os.makedirs(directory) + + + + +def mlp(input_dim, n_action, n_hidden_layers=1, hidden_dim=32): + """ A multi-layer perceptron """ + + model = MLPRegressor( + hidden_layer_sizes=n_hidden_layers * [hidden_dim], + ) + + # since we'll be first using this to make a prediction with random weights + # we need to know the output size + + # so we'll just start by fitting on some dummy data + X = np.random.randn(100, input_dim) + Y = np.random.randn(100, n_action) + model.partial_fit(X, Y) + + return model + + + + +class MultiStockEnv: + """ + A 3-stock trading environment. + State: vector of size 7 (n_stock * 2 + 1) + - # shares of stock 1 owned + - # shares of stock 2 owned + - # shares of stock 3 owned + - price of stock 1 (using daily close price) + - price of stock 2 + - price of stock 3 + - cash owned (can be used to purchase more stocks) + Action: categorical variable with 27 (3^3) possibilities + - for each stock, you can: + - 0 = sell + - 1 = hold + - 2 = buy + """ + def __init__(self, data, initial_investment=20000): + # data + self.stock_price_history = data + self.n_step, self.n_stock = self.stock_price_history.shape + + # instance attributes + self.initial_investment = initial_investment + self.cur_step = None + self.stock_owned = None + self.stock_price = None + self.cash_in_hand = None + + self.action_space = np.arange(3**self.n_stock) + + # action permutations + # returns a nested list with elements like: + # [0,0,0] + # [0,0,1] + # [0,0,2] + # [0,1,0] + # [0,1,1] + # etc. + # 0 = sell + # 1 = hold + # 2 = buy + self.action_list = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock))) + + # calculate size of state + self.state_dim = self.n_stock * 2 + 1 + + self.reset() + + + def reset(self): + self.cur_step = 0 + self.stock_owned = np.zeros(self.n_stock) + self.stock_price = self.stock_price_history[self.cur_step] + self.cash_in_hand = self.initial_investment + return self._get_obs() + + + def step(self, action): + assert action in self.action_space + + # get current value before performing the action + prev_val = self._get_val() + + # update price, i.e. go to the next day + self.cur_step += 1 + self.stock_price = self.stock_price_history[self.cur_step] + + # perform the trade + self._trade(action) + + # get the new value after taking the action + cur_val = self._get_val() + + # reward is the increase in porfolio value + reward = cur_val - prev_val + + # done if we have run out of data + done = self.cur_step == self.n_step - 1 + + # store the current value of the portfolio here + info = {'cur_val': cur_val} + + # conform to the Gym API + return self._get_obs(), reward, done, info + + + def _get_obs(self): + obs = np.empty(self.state_dim) + obs[:self.n_stock] = self.stock_owned + obs[self.n_stock:2*self.n_stock] = self.stock_price + obs[-1] = self.cash_in_hand + return obs + + + + def _get_val(self): + return self.stock_owned.dot(self.stock_price) + self.cash_in_hand + + + def _trade(self, action): + # index the action we want to perform + # 0 = sell + # 1 = hold + # 2 = buy + # e.g. [2,1,0] means: + # buy first stock + # hold second stock + # sell third stock + action_vec = self.action_list[action] + + # determine which stocks to buy or sell + sell_index = [] # stores index of stocks we want to sell + buy_index = [] # stores index of stocks we want to buy + for i, a in enumerate(action_vec): + if a == 0: + sell_index.append(i) + elif a == 2: + buy_index.append(i) + + # sell any stocks we want to sell + # then buy any stocks we want to buy + if sell_index: + # NOTE: to simplify the problem, when we sell, we will sell ALL shares of that stock + for i in sell_index: + self.cash_in_hand += self.stock_price[i] * self.stock_owned[i] + self.stock_owned[i] = 0 + if buy_index: + # NOTE: when buying, we will loop through each stock we want to buy, + # and buy one share at a time until we run out of cash + can_buy = True + while can_buy: + for i in buy_index: + if self.cash_in_hand > self.stock_price[i]: + self.stock_owned[i] += 1 # buy one share + self.cash_in_hand -= self.stock_price[i] + else: + can_buy = False + + + + + +class DQNAgent(object): + def __init__(self, state_size, action_size): + self.state_size = state_size + self.action_size = action_size + self.memory = ReplayBuffer(state_size, action_size, size=500) + self.gamma = 0.95 # discount rate + self.epsilon = 1.0 # exploration rate + self.epsilon_min = 0.01 + self.epsilon_decay = 0.995 + self.model = mlp(state_size, action_size) + + + def update_replay_memory(self, state, action, reward, next_state, done): + self.memory.store(state, action, reward, next_state, done) + + + def act(self, state): + if np.random.rand() <= self.epsilon: + return np.random.choice(self.action_size) + act_values = self.model.predict(state) + return np.argmax(act_values[0]) # returns action + + def replay(self, batch_size=32): + # first check if replay buffer contains enough data + if self.memory.size < batch_size: + return + + # sample a batch of data from the replay memory + minibatch = self.memory.sample_batch(batch_size) + states = minibatch['s'] + actions = minibatch['a'] + rewards = minibatch['r'] + next_states = minibatch['s2'] + done = minibatch['d'] + + # Calculate the tentative target: Q(s',a) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states), axis=1) + + # With the Keras API, the target (usually) must have the same + # shape as the predictions. + # However, we only need to update the network for the actions + # which were actually taken. + # We can accomplish this by setting the target to be equal to + # the prediction for all values. + # Then, only change the targets for the actions taken. + # Q(s,a) + target_full = self.model.predict(states) + target_full[np.arange(batch_size), actions] = target + + # Run one training step + self.model.partial_fit(states, target_full) + + if self.epsilon > self.epsilon_min: + self.epsilon *= self.epsilon_decay + + + def load(self, name): + with open(name, "rb") as f: + self.model = pickle.load(f) + + + def save(self, name): + with open(name, "wb") as f: + pickle.dump(self.model, f) + + +def play_one_episode(agent, env, is_train): + # note: after transforming states are already 1xD + state = env.reset() + state = scaler.transform([state]) + done = False + + while not done: + action = agent.act(state) + next_state, reward, done, info = env.step(action) + next_state = scaler.transform([next_state]) + if is_train == 'train': + agent.update_replay_memory(state, action, reward, next_state, done) + agent.replay(batch_size) + state = next_state + + return info['cur_val'] + + + +if __name__ == '__main__': + + # config + models_folder = 'rl_trader_models' + rewards_folder = 'rl_trader_rewards' + num_episodes = 2000 + batch_size = 32 + initial_investment = 20000 + + + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--mode', type=str, required=True, + help='either "train" or "test"') + args = parser.parse_args() + + maybe_make_dir(models_folder) + maybe_make_dir(rewards_folder) + + data = get_data() + n_timesteps, n_stocks = data.shape + + n_train = n_timesteps // 2 + + train_data = data[:n_train] + test_data = data[n_train:] + + env = MultiStockEnv(train_data, initial_investment) + state_size = env.state_dim + action_size = len(env.action_space) + agent = DQNAgent(state_size, action_size) + scaler = get_scaler(env) + + # store the final value of the portfolio (end of episode) + portfolio_value = [] + + if args.mode == 'test': + # then load the previous scaler + with open(f'{models_folder}/scaler.pkl', 'rb') as f: + scaler = pickle.load(f) + + # remake the env with test data + env = MultiStockEnv(test_data, initial_investment) + + # make sure epsilon is not 1! + # no need to run multiple episodes if epsilon = 0, it's deterministic + agent.epsilon = 0.01 + + # load trained weights + agent.load(f'{models_folder}/mlp.pkl') + + # play the game num_episodes times + for e in range(num_episodes): + t0 = datetime.now() + val = play_one_episode(agent, env, args.mode) + dt = datetime.now() - t0 + print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}") + portfolio_value.append(val) # append episode end portfolio value + + # save the weights when we are done + if args.mode == 'train': + # save the DQN + agent.save(f'{models_folder}/mlp.pkl') + + # save the scaler + with open(f'{models_folder}/scaler.pkl', 'wb') as f: + pickle.dump(scaler, f) + + + # save portfolio value for each episode + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) From 2049f4e078c556ed63240692c89382faec5876a9 Mon Sep 17 00:00:00 2001 From: Bob Date: Wed, 19 Feb 2025 02:23:27 -0500 Subject: [PATCH 323/329] update --- tf2.0/rl_trader.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tf2.0/rl_trader.py b/tf2.0/rl_trader.py index 2f98b964..6cac6b29 100644 --- a/tf2.0/rl_trader.py +++ b/tf2.0/rl_trader.py @@ -15,6 +15,11 @@ from sklearn.preprocessing import StandardScaler +import tensorflow as tf +# if tf.__version__.startswith('2'): +# tf.compat.v1.disable_eager_execution() + + # Let's use AAPL (Apple), MSI (Motorola), SBUX (Starbucks) def get_data(): # returns a T x 3 list of stock prices @@ -270,10 +275,10 @@ def update_replay_memory(self, state, action, reward, next_state, done): def act(self, state): if np.random.rand() <= self.epsilon: return np.random.choice(self.action_size) - act_values = self.model.predict(state) + act_values = self.model.predict(state, verbose=0) return np.argmax(act_values[0]) # returns action - + @tf.function def replay(self, batch_size=32): # first check if replay buffer contains enough data if self.memory.size < batch_size: @@ -288,7 +293,7 @@ def replay(self, batch_size=32): done = minibatch['d'] # Calculate the tentative target: Q(s',a) - target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states), axis=1) + target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states, verbose=0), axis=1) # With the Keras API, the target (usually) must have the same # shape as the predictions. @@ -298,7 +303,7 @@ def replay(self, batch_size=32): # the prediction for all values. # Then, only change the targets for the actions taken. # Q(s,a) - target_full = self.model.predict(states) + target_full = self.model.predict(states, verbose=0) target_full[np.arange(batch_size), actions] = target # Run one training step @@ -316,6 +321,7 @@ def save(self, name): self.model.save_weights(name) + def play_one_episode(agent, env, is_train): # note: after transforming states are already 1xD state = env.reset() @@ -340,6 +346,7 @@ def play_one_episode(agent, env, is_train): # config models_folder = 'rl_trader_models' rewards_folder = 'rl_trader_rewards' + model_file = 'dqn.weights.h5' num_episodes = 2000 batch_size = 32 initial_investment = 20000 @@ -383,7 +390,7 @@ def play_one_episode(agent, env, is_train): agent.epsilon = 0.01 # load trained weights - agent.load(f'{models_folder}/dqn.h5') + agent.load(f'{models_folder}/{model_file}') # play the game num_episodes times for e in range(num_episodes): @@ -396,7 +403,7 @@ def play_one_episode(agent, env, is_train): # save the weights when we are done if args.mode == 'train': # save the DQN - agent.save(f'{models_folder}/dqn.h5') + agent.save(f'{models_folder}/{model_file}') # save the scaler with open(f'{models_folder}/scaler.pkl', 'wb') as f: From d1421773718449281c6e67fa0141fba1deadc09b Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 15 May 2025 03:17:39 -0400 Subject: [PATCH 324/329] rl2v2 --- rl2v2/extra_reading.txt | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 rl2v2/extra_reading.txt diff --git a/rl2v2/extra_reading.txt b/rl2v2/extra_reading.txt new file mode 100644 index 00000000..b1b113f2 --- /dev/null +++ b/rl2v2/extra_reading.txt @@ -0,0 +1,8 @@ +Gymnasium Library +https://gymnasium.farama.org/ + +Stable Baselines 3 +https://github.com/DLR-RM/stable-baselines3 + +Reinforcement Learning Prerequisites +https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python \ No newline at end of file From 679383156fea7b34631d59a467d7c1afac519406 Mon Sep 17 00:00:00 2001 From: Bob Date: Mon, 19 May 2025 22:44:26 -0400 Subject: [PATCH 325/329] update --- recommenders/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/recommenders/extra_reading.txt b/recommenders/extra_reading.txt index 21d09a4b..510410cd 100644 --- a/recommenders/extra_reading.txt +++ b/recommenders/extra_reading.txt @@ -56,4 +56,7 @@ AutoRec: Autoencoders Meet Collaborative Filtering http://users.cecs.anu.edu.au/~u5098633/papers/www15.pdf Collaborative Filtering for Implicit Feedback Datasets -http://yifanhu.net/PUB/cf.pdf \ No newline at end of file +http://yifanhu.net/PUB/cf.pdf + +Neural Collaborative Filtering +https://arxiv.org/abs/1708.05031 \ No newline at end of file From 02d411548e2edb03d0101c00a20f32334059e60a Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 27 Jun 2025 02:42:48 -0400 Subject: [PATCH 326/329] update --- tf2.0/keras_trader.py | 6 +++--- tf2.0/mlp_trader.py | 6 +++--- tf2.0/rl_trader.py | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tf2.0/keras_trader.py b/tf2.0/keras_trader.py index 21d693e1..18545989 100644 --- a/tf2.0/keras_trader.py +++ b/tf2.0/keras_trader.py @@ -184,13 +184,13 @@ def step(self, action): # get current value before performing the action prev_val = self._get_val() + # perform the trade + self._trade(action) + # update price, i.e. go to the next day self.cur_step += 1 self.stock_price = self.stock_price_history[self.cur_step] - # perform the trade - self._trade(action) - # get the new value after taking the action cur_val = self._get_val() diff --git a/tf2.0/mlp_trader.py b/tf2.0/mlp_trader.py index 91b3463b..e5d53e56 100644 --- a/tf2.0/mlp_trader.py +++ b/tf2.0/mlp_trader.py @@ -165,13 +165,13 @@ def step(self, action): # get current value before performing the action prev_val = self._get_val() + # perform the trade + self._trade(action) + # update price, i.e. go to the next day self.cur_step += 1 self.stock_price = self.stock_price_history[self.cur_step] - # perform the trade - self._trade(action) - # get the new value after taking the action cur_val = self._get_val() diff --git a/tf2.0/rl_trader.py b/tf2.0/rl_trader.py index 6cac6b29..b5849494 100644 --- a/tf2.0/rl_trader.py +++ b/tf2.0/rl_trader.py @@ -177,13 +177,13 @@ def step(self, action): # get current value before performing the action prev_val = self._get_val() + # perform the trade + self._trade(action) + # update price, i.e. go to the next day self.cur_step += 1 self.stock_price = self.stock_price_history[self.cur_step] - # perform the trade - self._trade(action) - # get the new value after taking the action cur_val = self._get_val() @@ -411,4 +411,4 @@ def play_one_episode(agent, env, is_train): # save portfolio value for each episode - np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) + np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) \ No newline at end of file From f2937d2b6900a87e96e499105964d79c7c11163b Mon Sep 17 00:00:00 2001 From: Bob Date: Fri, 4 Jul 2025 23:42:03 -0400 Subject: [PATCH 327/329] update --- cnn_class2/extra_reading.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cnn_class2/extra_reading.txt b/cnn_class2/extra_reading.txt index 28c1a1ae..593bc0cc 100644 --- a/cnn_class2/extra_reading.txt +++ b/cnn_class2/extra_reading.txt @@ -14,4 +14,7 @@ Going Deeper with Convolutions (Inception) https://arxiv.org/abs/1409.4842 Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift -https://arxiv.org/abs/1502.03167 \ No newline at end of file +https://arxiv.org/abs/1502.03167 + +Deep learning improved by biological activation functions +https://arxiv.org/pdf/1804.11237.pdf \ No newline at end of file From 36272466ed6120fbfab819673e7437d1e48f50d9 Mon Sep 17 00:00:00 2001 From: Bob Date: Tue, 5 Aug 2025 04:22:06 -0400 Subject: [PATCH 328/329] update --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index b75ae2dd..e5075916 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,11 @@ Beginning with Tensorflow 2, I started to use Google Colab. For those courses, u VIP Course Links =================== +**Advanced AI: Deep Reinforcement Learning in PyTorch (v2)** + +https://deeplearningcourses.com/c/deep-reinforcement-learning-in-pytorch + + **Data Science: Transformers for Natural Language Processing** https://deeplearningcourses.com/c/data-science-transformers-nlp From 37a6606c989094ac9055d5a85aa1b2890869afe4 Mon Sep 17 00:00:00 2001 From: Bob Date: Thu, 18 Sep 2025 05:23:23 -0400 Subject: [PATCH 329/329] update --- rl3v2/extra_reading.txt | 23 ++++++++++++ rl3v2/visualize_es.py | 59 ++++++++++++++++++++++++++++++ rl3v2/visualize_hill_climbing.py | 61 ++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) create mode 100644 rl3v2/extra_reading.txt create mode 100644 rl3v2/visualize_es.py create mode 100644 rl3v2/visualize_hill_climbing.py diff --git a/rl3v2/extra_reading.txt b/rl3v2/extra_reading.txt new file mode 100644 index 00000000..cdff9892 --- /dev/null +++ b/rl3v2/extra_reading.txt @@ -0,0 +1,23 @@ +=== PART 1 === + +ES (Evolution Strategies) +"Evolution Strategies as a Scalable Alternative to Reinforcement Learning" +https://arxiv.org/abs/1703.03864 + +Trust Region Evolution Strategies +https://www.microsoft.com/en-us/research/uploads/prod/2018/11/trust-region-evolution-strategies.pdf + +The CMA Evolution Strategy: A Tutorial +https://arxiv.org/pdf/1604.00772 + +Simple random search provides a competitive approach to reinforcement learning (Augmented Random Search) +https://arxiv.org/abs/1803.07055 + +=== PART 2 === + +DDPG (Deep Deterministic Policy Gradient) +"Continuous control with deep reinforcement learning" +https://arxiv.org/abs/1509.02971 + +Deterministic Policy Gradient Algorithms +http://proceedings.mlr.press/v32/silver14.pdf \ No newline at end of file diff --git a/rl3v2/visualize_es.py b/rl3v2/visualize_es.py new file mode 100644 index 00000000..e518c388 --- /dev/null +++ b/rl3v2/visualize_es.py @@ -0,0 +1,59 @@ +import numpy as np +import matplotlib.pyplot as plt + +# Objective function to minimize (you can change this) +def f(x, y): + # return np.sin(x) + np.cos(y) + return -((x - 1)**2 + y**2) + +# Evolution Strategies optimizer (simple version) +def evolution_strategies( + f, bounds, pop_size=50, sigma=0.3, alpha=0.03, iterations=100 +): + dim = 2 + mu = np.random.uniform(bounds[0], bounds[1], size=dim) + + history = [] + + for gen in range(iterations): + # Sample noise + noise = np.random.randn(pop_size, dim) + population = mu + sigma * noise + fitness = np.array([f(x[0], x[1]) for x in population]) + + history.append((population.copy(), mu.copy())) + + # Normalize fitness for weighting + fitness_norm = (fitness - np.mean(fitness)) / (np.std(fitness) + 1e-8) + mu += alpha / (pop_size * sigma) * np.dot(noise.T, fitness_norm) + + return history + +# Visualization function +def visualize_es(history, bounds, f, resolution=100): + x = np.linspace(bounds[0], bounds[1], resolution) + y = np.linspace(bounds[0], bounds[1], resolution) + X, Y = np.meshgrid(x, y) + Z = f(X, Y) + + plt.figure(figsize=(8, 6)) + for i, (pop, mu) in enumerate(history): + plt.clf() + plt.contourf(X, Y, Z, levels=50, cmap='viridis') + plt.colorbar(label="f(x, y)") + plt.scatter(pop[:, 0], pop[:, 1], c='white', s=20, label='Population') + plt.scatter(mu[0], mu[1], c='red', s=80, label='Mean', edgecolors='black') + plt.title(f"Evolution Strategies - Step {i+1}") + plt.xlim(bounds[0], bounds[1]) + plt.ylim(bounds[0], bounds[1]) + plt.xlabel('x') + plt.ylabel('y') + plt.legend() + # plt.pause(0.1) + plt.waitforbuttonpress() + plt.show() + +# Run +bounds = (-5, 5) +history = evolution_strategies(f, bounds, iterations=80) +visualize_es(history, bounds, f) diff --git a/rl3v2/visualize_hill_climbing.py b/rl3v2/visualize_hill_climbing.py new file mode 100644 index 00000000..d640a20e --- /dev/null +++ b/rl3v2/visualize_hill_climbing.py @@ -0,0 +1,61 @@ +import numpy as np +import matplotlib.pyplot as plt + +# Objective function to minimize (you can change this) +def f(x, y): + # return np.sin(x) + np.cos(y) + return -((x - 1)**2 + y**2) + +# Evolution Strategies optimizer (simple version) +def hill_climb( + f, bounds, pop_size=1, sigma=0.3, alpha=0.3, iterations=100 +): + dim = 2 + mu = np.random.uniform(bounds[0], bounds[1], size=dim) + + history = [] + best_f = f(mu) + + for gen in range(iterations): + # Sample noise + noise = np.random.randn(pop_size, dim) + population = mu + sigma * noise + fitness = np.array([f(x[0], x[1]) for x in population]) + + history.append((population.copy(), mu.copy())) + + # Update point if it's better + if fitness[0] > best_f: + best_f = fitness[0] + mu = population.flatten() + + return history + +# Visualization function +def visualize_es(history, bounds, f, resolution=100): + x = np.linspace(bounds[0], bounds[1], resolution) + y = np.linspace(bounds[0], bounds[1], resolution) + X, Y = np.meshgrid(x, y) + Z = f(X, Y) + + plt.figure(figsize=(8, 6)) + for i, (pop, mu) in enumerate(history): + plt.clf() + plt.contourf(X, Y, Z, levels=50, cmap='viridis') + plt.colorbar(label="f(x, y)") + plt.scatter(pop[:, 0], pop[:, 1], c='white', s=20, label='Population') + plt.scatter(mu[0], mu[1], c='red', s=80, label='Mean', edgecolors='black') + plt.title(f"Hill Climbing - Step {i+1}") + plt.xlim(bounds[0], bounds[1]) + plt.ylim(bounds[0], bounds[1]) + plt.xlabel('x') + plt.ylabel('y') + plt.legend() + # plt.pause(0.1) + plt.waitforbuttonpress() + plt.show() + +# Run +bounds = (-5, 5) +history = hill_climb(f, bounds, iterations=80) +visualize_es(history, bounds, f)